prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# load tensorflow and keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers, optimizers, datasets
from tensorflow.keras.layers.experimental import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, roc_curve, auc
from sklearn.model_selection import cross_val_score, KFold, StratifiedKFold
from sklearn.inspection import permutation_importance
from sklearn.metrics import precision_recall_curve, f1_score
#helper libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
print(tf.__version__)
# In[ ]:
#Plotting function
def plot_history(model_history, model_name):
fig = plt.figure(figsize=(15,5), facecolor='w')
ax = fig.add_subplot(121)
ax.plot(model_history.history['loss'])
ax.plot(model_history.history['val_loss'])
ax.set(title=model_name + ': Model loss', ylabel='Loss', xlabel='Epoch')
ax.legend(['Train', 'Test'], loc='upper right')
ax = fig.add_subplot(122)
ax.plot(np.log(model_history.history['loss']))
ax.plot(np.log(model_history.history['val_loss']))
ax.set(title=model_name + ': Log model loss', ylabel='Log loss', xlabel='Epoch')
ax.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.close()
# In[ ]:
# Define the cross-validator object for regression, which inherits from
# StratifiedKFold, overwritting the split method
#code source: https://colab.research.google.com/drive/1KnXujsQDvLZOgCRg_iis036cwffwZM2_?usp=sharing#scrollTo=2q_q9w8Jpmwd
# &https://github.com/scikit-learn/scikit-learn/issues/4757
class StratifiedKFoldReg(StratifiedKFold):
"""
This class generate cross-validation partitions
for regression setups, such that these partitions
resemble the original sample distribution of the
target variable.
"""
def split(self, X, y, groups=None):
n_samples = len(y)
# Number of labels to discretize our target variable,
# into bins of quasi equal size
n_labels = int(np.round(n_samples/self.n_splits))
# Assign a label to each bin of n_splits points
y_labels_sorted = np.concatenate([np.repeat(ii, self.n_splits) for ii in range(n_labels)])
# Get number of points that would fall
# out of the equally-sized bins
mod = np.mod(n_samples, self.n_splits)
# Find unique idxs of first unique label's ocurrence
_, labels_idx = np.unique(y_labels_sorted, return_index=True)
# sample randomly the label idxs to which assign the
# the mod points
rand_label_ix = np.random.choice(labels_idx, mod, replace=False)
# insert these at the beginning of the corresponding bin
y_labels_sorted = np.insert(y_labels_sorted, rand_label_ix, y_labels_sorted[rand_label_ix])
# find each element of y to which label corresponds in the sorted
# array of labels
map_labels_y = dict()
for ix, label in zip(np.argsort(y), y_labels_sorted):
map_labels_y[ix] = label
# put labels according to the given y order then
y_labels = np.array([map_labels_y[ii] for ii in range(n_samples)])
return super().split(X, y_labels, groups)
# In[ ]:
#load data
raw_dataset = pd.read_csv('.spyder-py3/merged_datasets_for_simeval.csv')
dataset = raw_dataset.copy()
dataset.drop(['dofv', 'ID', 'Study_ID', 'Model_number', 'lin_model'], axis = 1, inplace=True)
dataset.head()
# In[ ]:
#split features and labels
x = dataset.copy()
y = x.pop('residual')
# In[ ]:
# normalization
# scaler = StandardScaler().fit(x)
# X = scaler.transform(x)
X = x.values # Normalization is built into model now
# In[ ]:
##################################################################################################################################################
# In[ ]:
#ANN5: This is the final model
n_splits = 10 #number of folds
loss_per_fold = [] #to store test loss value in each fold
Train_loss_per_fold = [] #to store training loss value in each fold
predcited_y = np.array([]) #to store predicted residual value from each CV fold
true_y = np.array([]) #to store true residual value from each CV fold
cv_stratified = StratifiedKFoldReg(n_splits=n_splits, shuffle=True, random_state=10) # Stratified CV
fold_no = 1
for ii, (train_index, test_index) in enumerate(cv_stratified.split(X, y)):
y_train, y_test = y[train_index], y[test_index]
X_train, X_test = X[train_index], X[test_index]
#Define and summarize the model
inps = layers.Input(shape=X_train[0].shape)
norm_layer = layers.Normalization(axis=1)
norm_layer.adapt(X_train)
x = norm_layer(inps)
x = layers.Dense(48, activation='relu')(x)
x = layers.Dense(24, activation='relu')(x)
x = layers.Dense(12, activation='relu')(x)
x = layers.Dropout(0.2)(x)
preds = layers.Dense(1)(x)
ANN5 = models.Model(inputs=inps, outputs=preds)
#Compile the model
lr = 0.00007
ANN5.compile(optimizer=optimizers.RMSprop(lr=lr), loss='mse')
# Generate a print
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
test_labels = y_test.to_list()
test_labels = [round(num, 2) for num in test_labels]
print(test_labels) #to have a look at the true residual values for test dataset
#print histogram of y_test and y_train
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
axs.hist(y_train, label="training")
axs.hist(y_test, label="test")
axs.legend()
plt.tight_layout()
# Fit data to model
history = ANN5.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
plot_history(history, 'ANN5')
#to store values for plotting global predicted vs. true residual values
test_predictions = ANN5.predict(X_test).flatten()
predcited_y = np.append(predcited_y, test_predictions)
y_test_array = y_test.values
true_y = np.append(true_y, y_test_array)
# Generate generalization metrics
scores = ANN5.evaluate(X_test, y_test, verbose=0)
print(f'Test Score for fold {fold_no}: {ANN5.metrics_names} of {scores}')
scores_training = ANN5.evaluate(X_train, y_train, verbose=0)
print(f'Training Score for fold {fold_no}: {ANN5.metrics_names} of {scores_training}')
loss_per_fold.append(scores)
Train_loss_per_fold.append(scores_training)
# Increase fold number
fold_no = fold_no + 1
# global plot true vs. predicted
a = plt.axes(aspect='equal')
plt.scatter(predcited_y, true_y)
plt.xlabel('Predictions [residual]')
plt.ylabel('True Values [residual]')
lims = [-5, 20]
plt.xlim(lims)
plt.ylim(lims)
prediction_plot = plt.plot(lims, lims)
# In[ ]:
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(loss_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Training Loss: {Train_loss_per_fold[i]} - Testing Loss: {loss_per_fold[i]} -')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Test Loss: {np.mean(loss_per_fold)}')
print(f'> Training Loss: { | np.mean(Train_loss_per_fold) | numpy.mean |
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import src.ticTacToeGenerator
def main():
tttGameBoard = src.ticTacToeGenerator.GameBoard()
feature_columns = [tf.feature_column.numeric_column("x", shape=[9])]
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns,
model_dir="/tmp/ttt_model_regressor")
# Generate random data to train again
tttGameBoard.generate_100_games_random()
board_train, board_test, result_train, result_test = train_test_split(tttGameBoard.overallHistory,
tttGameBoard.overallResults,
test_size=0.20)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_train)},
y=np.array(result_train),
num_epochs=None,
shuffle=True)
estimator.train(input_fn=input_fn, steps=100)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_train)},
y=np.array(result_train),
batch_size=4,
num_epochs=100,
shuffle=False)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_test)},
y=np.array(result_test),
batch_size=4,
num_epochs=100,
shuffle=False)
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=test_input_fn)
print("train metrics: %r" % train_metrics)
print("eval metrics: %r" % eval_metrics)
player_one_wins, player_two_wins, ties = 0, 0, 0
for x in range(len(tttGameBoard.winnerResults)):
if tttGameBoard.winnerResults[x] == tttGameBoard.playerOneMark:
player_one_wins += 1
elif tttGameBoard.winnerResults[x] == tttGameBoard.playerTwoMark:
player_two_wins += 1
else:
ties += 1
print("Player One Wins: " + str(player_one_wins) +
" Player Two Wins: " + str(player_two_wins) +
" Ties: " + str(ties))
# Generate random data to train again
tttGameBoard.clear_history()
tttGameBoard.generate_100_games_predict_player_one_random_player_two(estimator)
board_train, board_test, result_train, result_test = train_test_split(tttGameBoard.overallHistory,
tttGameBoard.overallResults,
test_size=0.20)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_train)},
y=np.array(result_train),
num_epochs=None,
shuffle=True)
estimator.train(input_fn=input_fn, steps=100)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_train)},
y=np.array(result_train),
batch_size=4,
num_epochs=100,
shuffle=False)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(board_test)},
y= | np.array(result_test) | numpy.array |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.deprecated` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from colour.models.rgb.deprecated import (RGB_to_HSV, HSV_to_RGB, RGB_to_HSL,
HSL_to_RGB, RGB_to_CMY, CMY_to_RGB,
CMY_to_CMYK, CMYK_to_CMY)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestRGB_to_HSV', 'TestHSV_to_RGB', 'TestRGB_to_HSL', 'TestHSL_to_RGB',
'TestRGB_to_CMY', 'TestCMY_to_RGB', 'TestCMY_to_CMYK', 'TestCMYK_to_CMY'
]
class TestRGB_to_HSV(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.deprecated.RGB_to_HSV` definition unit
tests methods.
"""
def test_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSV` definition.
"""
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([0.99603944, 0.93246304, 0.45620519]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([1.00000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 0.00000000, 1.00000000]),
decimal=7)
def test_n_dimensional_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSV` definition
n-dimensional arrays support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSV = RGB_to_HSV(RGB)
RGB = np.tile(RGB, (6, 1))
HSV = np.tile(HSV, (6, 1))
np.testing.assert_almost_equal(RGB_to_HSV(RGB), HSV, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
HSV = np.reshape(HSV, (2, 3, 3))
np.testing.assert_almost_equal(RGB_to_HSV(RGB), HSV, decimal=7)
def test_domain_range_scale_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSV` definition domain
and range scale support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSV = RGB_to_HSV(RGB)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
RGB_to_HSV(RGB * factor), HSV * factor, decimal=7)
@ignore_numpy_errors
def test_nan_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSV` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
RGB_to_HSV(RGB)
class TestHSV_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.deprecated.HSV_to_RGB` definition unit
tests methods.
"""
def test_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.deprecated.HSV_to_RGB` definition.
"""
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.99603944, 0.93246304, 0.45620519])),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7)
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.00000000, 0.00000000, 1.00000000])),
np.array([1.00000000, 1.00000000, 1.00000000]),
decimal=7)
def test_n_dimensional_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.deprecated.HSV_to_RGB` definition
n-dimensional arrays support.
"""
HSV = np.array([0.99603944, 0.93246304, 0.45620519])
RGB = HSV_to_RGB(HSV)
HSV = np.tile(HSV, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_almost_equal(HSV_to_RGB(HSV), RGB, decimal=7)
HSV = np.reshape(HSV, (2, 3, 3))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_almost_equal(HSV_to_RGB(HSV), RGB, decimal=7)
def test_domain_range_scale_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.deprecated.HSV_to_RGB` definition domain
and range scale support.
"""
HSV = np.array([0.99603944, 0.93246304, 0.45620519])
RGB = HSV_to_RGB(HSV)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
HSV_to_RGB(HSV * factor), RGB * factor, decimal=7)
@ignore_numpy_errors
def test_nan_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.deprecated.HSV_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
HSV = np.array(case)
HSV_to_RGB(HSV)
class TestRGB_to_HSL(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.deprecated.RGB_to_HSL` definition unit
tests methods.
"""
def test_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSL` definition.
"""
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([0.99603944, 0.87347144, 0.24350795]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([1.00000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 0.00000000, 1.00000000]),
decimal=7)
def test_n_dimensional_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.deprecated.RGB_to_HSL` definition
n-dimensional arrays support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSL = RGB_to_HSL(RGB)
RGB = np.tile(RGB, (6, 1))
HSL = np.tile(HSL, (6, 1))
np.testing.assert_almost_equal(RGB_to_HSL(RGB), HSL, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
HSL = | np.reshape(HSL, (2, 3, 3)) | numpy.reshape |
import param
import numpy as np
from bokeh.models import Patches
from ...core.data import Dataset
from ...core.util import basestring, max_range, dimension_sanitizer
from .graphs import GraphPlot
class SankeyPlot(GraphPlot):
color_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node colors will be drawn""")
label_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
label_position = param.ObjectSelector(default='right', objects=['left', 'right'],
doc="""
Whether node labels should be placed to the left or right.""")
show_values = param.Boolean(default=True, doc="""
Whether to show the values.""")
node_width = param.Number(default=15, doc="""
Width of the nodes.""")
node_padding = param.Integer(default=10, doc="""
Number of pixels of padding relative to the bounds.""")
iterations = param.Integer(default=32, doc="""
Number of iterations to run the layout algorithm.""")
_style_groups = dict(GraphPlot._style_groups, quad='nodes', text='label')
_draw_order = ['patches', 'quad', 'text']
style_opts = GraphPlot.style_opts + ['edge_fill_alpha', 'nodes_line_color',
'label_text_font_size']
filled = True
def _init_glyphs(self, plot, element, ranges, source):
ret = super(SankeyPlot, self)._init_glyphs(plot, element, ranges, source)
renderer = plot.renderers.pop(plot.renderers.index(self.handles['glyph_renderer']))
plot.renderers = [renderer] + plot.renderers
return ret
def get_data(self, element, ranges, style):
data, mapping, style = super(SankeyPlot, self).get_data(element, ranges, style)
self._compute_quads(element, data, mapping)
style['nodes_line_color'] = 'black'
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Sankey not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
self._compute_labels(element, data, mapping)
self._patch_hover(element, data)
return data, mapping, style
def _compute_quads(self, element, data, mapping):
"""
Computes the node quad glyph data.x
"""
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = dict(data['scatter_1'])
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['quad_1'] = quad_data
if 'node_fill_color' in mapping['scatter_1']:
quad_mapping['fill_color'] = mapping['scatter_1']['node_fill_color']
mapping['quad_1'] = quad_mapping
def _compute_labels(self, element, data, mapping):
"""
Computes labels for the nodes and adds it to the data.
"""
lidx = element.nodes.get_dimension(self.label_index)
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name]>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
value_dim = element.vdims[0]
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
if self.show_values:
value_labels = []
for i, node in enumerate(element._sankey['nodes']):
value = value_dim.pprint_value(node['value'])
label = '%s - %s' % (labels[i], value)
if value_dim.unit:
label += ' %s' % value_dim.unit
value_labels.append(label)
labels = value_labels
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
offset = (nodes[0]['x1']-nodes[0]['x0'])/4.
if self.label_position == 'right':
xs = | np.array([node['x1'] for node in nodes]) | numpy.array |
'''
File: simLVcircuit.py
Description: simulate circuit with ngspice
History:
Date Programmer SAR# - Description
---------- ---------- ----------------------------
Author: <EMAIL> 08MAR2021 - Created
Author: <EMAIL> 08MAR2021 - v1.0.0
Author: <EMAIL> 13APR2021 - v2.0.0
Author: <EMAIL> 21APR2021 - v2.1.0
Author: <EMAIL> 09JUN2021 - v3.0.0
-added remove "temp_"+lvufilename[:-4] files
Author: <EMAIL> 10JUL2021 - v3.1.2
- debug lvflowrate input run by adding voltage lvgnd2 to gnd as v(lv)
'''
########################################################################
_version='3.1.2'
import logging
logger = logging.getLogger(__name__)
import sys
import vtk
import os
import inspect
from heartFEM import ngspice_py
import numpy as np
from scipy import interpolate
########################################################################
suffixDict={4:'T ',3:'g ',2:'meg',1:'k ',0:' ',-1:'m ',-2:'u ',-3:'n ',-4:'p ',-5:'f '}
def simLVcircuit(casename,stopTime,lvufile,lvinputvar='V',initLAvol=0,initRAvol=0,initLVvol=0,initRVvol=0,vla0=None,vra0=None,init_file=None,init_time=None,additionalVarDict=None,timetopeak_from_to=None,verbose=True):
logger.info('*** simulateLVcircuit ***')
cirfilename = casename + ".cir"
cirtempfilename = casename + "_temp.cir"
cirlogfilename = casename + "_cir.log"
cmd = "cp " + cirfilename + " " + cirtempfilename
os.system(cmd)
cmd = "sed -i.bak s/'<<stopTime>>'/'" + '{:.2f}m'.format(stopTime) + "'/g " + cirtempfilename
os.system(cmd)
if lvinputvar=='i' or lvinputvar=='I':
cmd = "sed -i.bak s/'<<lvinputvar>>'/'I'/g " + cirtempfilename
os.system(cmd)
#cmd = "sed -i.bak s/'Vlvu'/'*Vlvu'/g " + cirtempfilename
#os.system(cmd)
#cmd = "sed -i.bak s/'*Elvu'/'Elvu'/g " + cirtempfilename
#os.system(cmd)
else:
cmd = "sed -i.bak s/'<<lvinputvar>>'/'V'/g " + cirtempfilename
os.system(cmd)
if lvinputvar in ['i','v','I','V']:
cmd = "sed -i.bak s/'<<lvsourcemode>>'/'2'/g " + cirtempfilename
os.system(cmd)
if os.path.isfile(lvufile):
case_dir,lvufilename = os.path.split(lvufile)
else:
data=np.array(lvufile)
case_dir=os.getcwd()
lvufilename='lvufile.txt'
lvufile=os.getcwd()+'/'+lvufilename
np.savetxt(lvufile,data)
if init_file is not None and init_time is not None:
data= | np.loadtxt(lvufile) | numpy.loadtxt |
"""
Base classes used in the `ChiantiPy.core.ion` and `ChiantiPy.core.spectrum`
classes. Mostly printing, plotting and saving routines.
"""
import copy
import time
import numpy as np
import matplotlib.pyplot as plt
import ChiantiPy.tools.util as util
import ChiantiPy.Gui as chGui
import ChiantiPy.tools.data as chdata
class ionTrails(object):
"""
Base class for `ChiantiPy.core.ion` and `ChiantiPy.core.spectrum`
"""
def argCheck(self, temperature=None, eDensity=None, pDensity='default', em = None, verbose=0):
''' to check the compatibility of the three arguments
and put them into numpy arrays of atleast_1d
and create attributes to the object
'''
if temperature is not None:
self.Temperature = np.atleast_1d(temperature)
if isinstance(self.Temperature[0], str):
raise ValueError(' temperature can not be a string')
if np.any(self.Temperature <= 0.):
raise ValueError(' all temperatures must be positive')
self.Ntemp = self.Temperature.size
else:
raise ValueError('temperature not defined')
if pDensity == 'default':
self.p2eRatio()
if eDensity is not None:
self.EDensity = | np.atleast_1d(eDensity) | numpy.atleast_1d |
#! /usr/bin/env python
'''
tensorflow==2.5.0+
python3 openvino2tensorflow.py \
--model_path openvino/448x448/FP32/Resnet34_3inputs_448x448_20200609.xml \
--output_saved_model \
--output_pb \
--output_weight_quant_tflite \
--output_float16_quant_tflite \
--output_no_quant_float32_tflite
python3 openvino2tensorflow.py \
--model_path debug/openvino/yolox_nano/320x320/FP32/yolox_nano_320x320.xml \
--output_saved_model \
--output_pb \
--output_no_quant_float32_tflite \
--weight_replacement_config debug/weight_replacement_config_yolox_nano.json
'''
import os
import sys
import argparse
import struct
import numpy as np
from pathlib import Path
import xml.etree.ElementTree as et
import logging
import warnings
from tensorflow.python.framework.ops import _run_using_default_session
from tensorflow.python.keras.backend import ndim
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
class Color:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
COLOR_DEFAULT = '\033[39m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
INVISIBLE = '\033[08m'
REVERCE = '\033[07m'
BG_BLACK = '\033[40m'
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_YELLOW = '\033[43m'
BG_BLUE = '\033[44m'
BG_MAGENTA = '\033[45m'
BG_CYAN = '\033[46m'
BG_WHITE = '\033[47m'
BG_DEFAULT = '\033[49m'
RESET = '\033[0m'
def convert(model_path,
model_output_path,
output_saved_model,
output_h5,
output_weight_and_json,
output_pb,
output_no_quant_float32_tflite,
output_dynamic_range_quant_tflite,
output_weight_quant_tflite,
output_float16_quant_tflite,
output_integer_quant_tflite,
output_full_integer_quant_tflite,
output_integer_quant_type,
string_formulas_for_normalization,
calib_ds_type,
ds_name_for_tfds_for_calibration,
split_name_for_tfds_for_calibration,
download_dest_folder_path_for_the_calib_tfds,
tfds_download_flg,
npy_load_default_path,
load_dest_file_path_for_the_calib_npy,
output_tfjs,
output_tftrt_float32,
output_tftrt_float16,
tftrt_maximum_cached_engines,
output_coreml,
output_edgetpu,
edgetpu_compiler_timeout,
edgetpu_num_segments,
output_onnx,
onnx_opset,
use_onnx_optimization,
output_myriad,
vpu_number_of_shaves,
vpu_number_of_cmx_slices,
replace_swish_and_hardswish,
optimizing_hardswish_for_edgetpu,
replace_prelu_and_minmax,
restricted_resize_image_mode,
weight_replacement_config,
use_experimental_new_quantizer,
optimizing_barracuda,
layerids_of_the_terminating_output,
keep_input_tensor_in_nchw):
print(f'{Color.REVERCE}TensorFlow/Keras model building process starts{Color.RESET}', '=' * 38)
import subprocess
import tensorflow as tf
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel(logging.ERROR)
import tensorflow_datasets as tfds
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, AveragePooling2D, Conv2DTranspose, PReLU, Lambda, LeakyReLU, Conv3D
from tensorflow.keras.initializers import Constant
from tensorflow.keras.activations import elu
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from tensorflow.python.framework.ops import EagerTensor
if output_coreml:
import coremltools as ct
import json
import pprint
import math
# for unpacking binary buffer
format_config = {
'FP32' : ['f', 4],
'FP16' : ['e', 2],
'I64' : ['q', 8],
'I32' : ['i', 4],
'I16' : ['h', 2],
'I8' : ['b', 1],
'U8' : ['B', 1],
'BOOL' : ['?', 1]
}
# vino: u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, bf16, boolean
# tf : uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64, bfloat16
# type conversion table
cast_type_ov_tf = {
'u8' : tf.uint8,
'u16' : tf.uint16,
'u32' : tf.uint32,
'u64' : tf.uint64,
'i8' : tf.int8,
'i16' : tf.int16,
'i32' : tf.int32,
'i64' : tf.int64,
'f16' : tf.float16,
'f32' : tf.float32,
'bf16': tf.bfloat16,
'boolean': tf.bool
}
# integer type table
int_type_tf = [
tf.uint8,
tf.uint16,
tf.uint32,
tf.uint64,
tf.int8,
tf.int16,
tf.int32,
tf.int64
]
# pad type conversion table
pad_type_ov_tf = {
'constant' : 'CONSTANT',
'reflect' : 'REFLECT',
'symmetric': 'SYMMETRIC',
'edge' : 'REFLECT'
}
# Read IR weight data
with open(model_path+'.bin', 'rb') as f:
binWeight = f.read()
# Parse IR XML file,
tree = et.parse(model_path+'.xml')
root = tree.getroot()
edges = root.find('edges')
layers = root.find('layers')
tf_layers_dict = {}
tf_edges = {}
tf_inputs = []
tf_outputs = []
layer_id_port_dict = {}
def get_num_of_outputs_per_layer_id(tf_edges):
output_count_by_layer_id_tmp = {}
for key in tf_edges.keys():
key_tmp = key.split(':')[0]
output_count_by_layer_id_tmp.setdefault(key_tmp, {'count': 0, 'layer_id:port': []})
output_count_by_layer_id_tmp[key_tmp]['count'] += 1
output_count_by_layer_id_tmp[key_tmp]['layer_id:port'].append(key)
return output_count_by_layer_id_tmp
def get_bere_layer_type(before_layer):
t = type(tf_layers_dict[before_layer])
if t == np.ndarray:
# Const
return 'const'
else:
try:
return tf_layers_dict[before_layer.split(':')[0]].op.type
except:
# TopK
return 'other'
def get_tf_edges_from(tf_edges, layer_id, edge_index=-1):
if edge_index == -1:
# Add, Concat
layer_list = []
for edge_index in range(len(tf_edges[layer_id])):
before_layer_type = get_bere_layer_type(tf_edges[layer_id][edge_index])
if before_layer_type == 'Split':
layer_list.append(tf_edges[layer_id][edge_index])
elif before_layer_type == 'other':
layer_list.append(tf_edges[layer_id][edge_index])
else:
layer_list.append(tf_edges[layer_id][edge_index].split(':')[0])
return layer_list
else:
# Other
if layer_id in tf_edges:
before_layer_type = get_bere_layer_type(tf_edges[layer_id][edge_index])
else:
for key in tf_edges.keys():
if layer_id in key:
before_layer_type = get_bere_layer_type(tf_edges[key][edge_index])
layer_id = key
break
if before_layer_type == 'Split':
return tf_edges[layer_id][edge_index]
elif before_layer_type == 'other':
return tf_edges[layer_id][edge_index]
else:
return tf_edges[layer_id][edge_index].split(':')[0]
"""
format_version : Format version of weight_replacement_config.
layer_id : ID of the Const layer whose weight/constant parameter is to be swapped.
For example, specify "1123" for layer id="1123" for type="Const" in .xml.
<layer id="1123" name="Decoder/softmax/Reshape_1/Cast_123722_const657_const" type="Const" version="opset1">
<data element_type="i64" offset="7632604" shape="4" size="32"/>
<output>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
replace_mode : "direct" or "npy"
"direct": Specify the values of the Numpy matrix directly in the "values" attribute.
Ignores the values recorded in the .bin file and replaces them with the values specified in "values".
{
"layer_id": "1123",
"replace_mode": "direct",
"values": [
1,
2,
513,
513
]
}
"npy": Load a Numpy binary file with the matrix output by np.save('xyz', a).
The "values" attribute specifies the path to the Numpy binary file.
{
"layer_id": "1125",
"replace_mode": "npy",
"values": "weights/xyz.npy"
}
values : Specify the value or the path to the Numpy binary file to replace the weight/constant value recorded in .bin.
The way to specify is as described in the description of 'replace_mode'.
"""
# Elements for each version of weights_replacement_config
# key = config version
# value = Allowed elements for each version
weights_replacement_config_version_elements = {
1 : ['layer_id', 'replace_mode', 'values'],
2 : ['layer_id', 'type', 'replace_mode', 'values']
}
# Combinations of possible values for type key and replace_mode in weights_replacement_config.
# key = Type name
# value = List of replace_mode
weights_replacement_config_types = {
'Const': ['direct', 'npy'],
'Transpose': ['insert_before', 'insert_after'],
'Reshape': ['insert_before', 'insert_after'],
'Cast': ['insert_before', 'insert_after'],
'Concat': ['change_axis'],
'SoftMax': ['change_axis'],
'ShuffleChannels': ['change_axis'],
'StridedSlice': ['change_attributes'],
'MaxPool': ['change_padding_mode'],
'PReLU': ['change_shared_axes'],
'ReverseSequence': ['change_batch_axis', 'change_seq_axis'],
'Squeeze': ['insert_before', 'insert_after'],
'Unsqueeze': ['insert_before', 'insert_after'],
}
def parse_json(jsonfile_path: str):
"""Parsing weights_replacement_config
Args:
----------
jsonfile_path : str
Path to the weights_replacement_config file
Returns:
----------
format_version : int
Format version number of weights_replacement_config
layers : dict
Result of parsing weights_replacement_config into dict format
"""
j = json.load(open(jsonfile_path))
format_version = j['format_version']
layers = {}
for v in j['layers']:
# Elements check
for k in v.keys():
if not k in weights_replacement_config_version_elements[format_version]:
key_name1 = 'layer_id'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config with format_version: {format_version}. layer_id: {v[key_name1]}, key: "{k}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow in format_version: {format_version}. {weights_replacement_config_version_elements[format_version]}')
sys.exit(-1)
for k in weights_replacement_config_version_elements[format_version]:
if not k in v.keys():
key_name1 = 'layer_id'
print(f'{Color.RED}ERROR:{Color.RESET} Missing elements that must be included in the config for format_version: {format_version}. layer_id: {v[key_name1]}, key: "{k}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of elements that must be included in the config for format_version: {format_version}. {weights_replacement_config_version_elements[format_version]}')
sys.exit(-1)
# weights_replacement_config_types check (Only when format_version is 2 or higher)
if format_version >= 2:
# Type check
if not v['type'] in weights_replacement_config_types.keys():
key_name1 = 'layer_id'
key_name2 = 'type'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config. layer_id: {v[key_name1]}, type: "{v[key_name2]}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow. {weights_replacement_config_types.keys()}')
sys.exit(-1)
# Replace Mode check
if not v['replace_mode'] in weights_replacement_config_types[v['type']]:
key_name1 = 'layer_id'
key_name2 = 'replace_mode'
key_name3 = 'type'
print(f'{Color.RED}ERROR:{Color.RESET} It contains a key that cannot be included in the config. layer_id: {v[key_name1]}, replace_mode: "{v[key_name2]}"')
print(f'{Color.RED}ERROR:{Color.RESET} List of keys to allow. {weights_replacement_config_types[v[key_name3]]}')
sys.exit(-1)
layers[v['layer_id']] = v
print(f'{Color.GREEN}weight_replacement_config format_version:{Color.RESET} {format_version}')
print(f'{Color.GREEN}Replace the value of Const for each layer_id with the value below.{Color.RESET}')
pprint.pprint(layers)
return format_version, layers
format_version = None
wr_config = None
if weight_replacement_config:
format_version, wr_config = parse_json(weight_replacement_config)
def extrapolation_of_layers(setting_up_layers_to_be_extrapolated: dict, input):
"""Processing of input operations based on weights_replacement_config settings
Args:
----------
setting_up_layers_to_be_extrapolated : dict
wr_config[layer_id]
{
"layer_id": "659",
"type": "Transpose",
"replace_mode": "insert_before",
"values": [0,2,1]
}
input : INPUT operation
INPUT layer to be input to TF operations
Returns:
----------
Processed input operations
"""
tf_layer = None
layer_type = setting_up_layers_to_be_extrapolated['type']
param = setting_up_layers_to_be_extrapolated['values']
if layer_type == 'Transpose':
tf_layer = tf.transpose(
input,
perm=param
)
elif layer_type == 'Reshape':
tf_layer = tf.reshape(
input,
shape=param
)
elif layer_type == 'Cast':
tf_layer = tf.cast(
input,
dtype=cast_type_ov_tf[param]
)
elif layer_type == 'Squeeze':
tf_layer = tf.squeeze(
input,
axis=param
)
elif layer_type == 'Unsqueeze':
tf_layer = tf.expand_dims(
input,
axis=param
)
return tf_layer
print(f'{Color.REVERCE}Layer structure{Color.RESET}', '=' * 69)
def layer_structure_print(info: dict) -> None:
for key, value in info.items():
print(f'{Color.GREEN}{key}{Color.RESET}: {value}')
print('=' * 84)
# edges
added_key_list = []
concat_port_list = {}
for edge in edges:
to_layer = edge.attrib['to-layer']
to_layer_port = edge.attrib['to-port']
from_layer = edge.attrib['from-layer']
from_layer_port = edge.attrib['from-port']
for layer in layers:
if layer.attrib['id'] == to_layer:
output_layer_ports = layer.find('output')
if layer.attrib['type'] != 'Result' and len(output_layer_ports) >= 2:
for port in output_layer_ports:
tf_edges.setdefault('{}:{}'.format(to_layer, port.attrib['id']), []).append(from_layer)
added_key_list.append(to_layer)
else:
tf_edges.setdefault(to_layer, [])
if layer.attrib['type'] == 'Concat' or \
layer.attrib['type'] == 'Gather' or \
layer.attrib['type'] == 'GatherND' or \
layer.attrib['type'] == 'GatherElements' or \
layer.attrib['type'] == 'ScatterElementsUpdate' or \
layer.attrib['type'] == 'ScatterNDUpdate' or \
layer.attrib['type'] == 'Reshape' or \
layer.attrib['type'] == 'ConvertLike' or \
layer.attrib['type'] == 'Subtract' or \
layer.attrib['type'] == 'Divide' or \
layer.attrib['type'] == 'FloorMod' or \
layer.attrib['type'] == 'Power' or \
layer.attrib['type'] == 'MatMul' or \
layer.attrib['type'] == 'Greater' or \
layer.attrib['type'] == 'GreaterEqual' or \
layer.attrib['type'] == 'Less' or \
layer.attrib['type'] == 'LessEqual' or \
layer.attrib['type'] == 'SquaredDifference' or \
layer.attrib['type'] == 'PriorBox' or \
layer.attrib['type'] == 'PriorBoxClustered' or \
layer.attrib['type'] == 'StridedSlice' or \
layer.attrib['type'] == 'Select' or \
layer.attrib['type'] == 'VariadicSplit' or \
layer.attrib['type'] == 'ReverseSequence' or \
layer.attrib['type'] == 'Range':
concat_port_list.setdefault(to_layer, []).append(f'{from_layer}:{to_layer_port}')
for layer in layers:
if layer.attrib['id'] == from_layer:
output_layer_ports = layer.find('output')
if len(output_layer_ports) >= 2:
flg = 'not_found'
for key in tf_edges.keys():
if to_layer in key and from_layer in tf_edges[key] and '{}:{}'.format(from_layer, from_layer_port) not in tf_edges[key]:
tf_edges[key].append('{}:{}'.format(from_layer, from_layer_port))
flg = 'found'
try:
tf_edges[key].remove(from_layer)
except:
pass
if flg == 'not_found':
tf_edges.setdefault(to_layer, []).append('{}:{}'.format(from_layer, from_layer_port))
if to_layer not in added_key_list:
added_key_list.append(to_layer)
else:
if to_layer not in added_key_list:
tf_edges.setdefault(to_layer, []).append(from_layer)
added_key_list.append(to_layer)
else:
flg = 'not_found'
for key in tf_edges.keys():
if to_layer in key and from_layer in tf_edges[key]:
flg = 'found'
break
if flg == 'not_found':
tf_edges.setdefault(to_layer, []).append(from_layer)
break
# The following loop sorts tf_edges in ascending order by port
for to_layer, from_layer_ports in concat_port_list.items():
temp_sorted_tf_edge = []
# from_layer_ports = [from_layer_id:port, from_layer_id:port, from_layer_id:port, ...]
ports = [p.split(':')[1] for p in from_layer_ports]
for idx, port in enumerate(ports):
temp_sorted_tf_edge.append(tf_edges[to_layer][ports.index(str(idx))])
tf_edges[to_layer] = temp_sorted_tf_edge
del added_key_list
del concat_port_list
layer_id_port_dict = get_num_of_outputs_per_layer_id(tf_edges)
# layers
for idx, layer in enumerate(layers):
layer_id = layer.attrib['id']
layer_name = layer.attrib['name'].replace('.', '_').replace('/', '_')
data = layer.find('data')
try:
outputs = None
layer_id_values = None
layer_id_indices = None
### Parameter
if layer.attrib['type'] == 'Parameter':
if not data is None and 'shape' in data.attrib:
shape_str = data.attrib['shape'].split(',')
shape = [int(s) for s in shape_str]
if len(shape) == 4:
if not keep_input_tensor_in_nchw:
tf_layers_dict[layer_id] = Input(shape=(shape[2], shape[3], shape[1]), batch_size=shape[0], name=layer_name)
else:
nchw = Input(shape=(shape[1], shape[2], shape[3]), batch_size=shape[0], name=layer_name)
tf_layers_dict[layer_id] = tf.transpose(nchw, perm=[0,2,3,1])
else:
if keep_input_tensor_in_nchw:
print(f'{Color.RED}ERROR:{Color.RESET} The keep_input_tensor_in_nchw parameter only supports 4D input. layer_id: {layer_id} input_shape: {shape}')
sys.exit(-1)
tf_layers_dict[layer_id] = Input(shape=[inp for inp in shape[1:]], batch_size=shape[0], name=layer_name)
if keep_input_tensor_in_nchw:
tf_inputs.append(nchw)
else:
tf_inputs.append(tf_layers_dict[layer_id])
layer_structure_print(
{
'layer_type': 'Input',
'layer_id': layer_id,
'tf_layers_dict': tf_layers_dict[layer_id]
}
)
if wr_config and layer_id in wr_config and format_version >= 2:
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to "Parameter" is not supported. layer_id: {layer_id}')
sys.exit(-1)
### Const
elif layer.attrib['type'] == 'Const':
if not data is None:
if 'offset' in data.attrib and 'size' in data.attrib:
offset = int(data.attrib['offset'])
size = int(data.attrib['size'])
shape_str = '1' if data.attrib['shape'] == '' else data.attrib['shape'].split(',')
shape = [int(s) for s in shape_str]
blobBin = binWeight[offset:offset+size]
prec = layer.find('output').find('port').attrib['precision']
formatstring = '<' + format_config[prec][0] * (len(blobBin)//format_config[prec][1])
decodedwgt = np.array(list(struct.unpack(formatstring, blobBin))).reshape(shape)
if not wr_config or layer_id not in wr_config:
if type(decodedwgt) == np.ndarray and decodedwgt.dtype == np.float64:
tf_layers_dict[layer_id] = decodedwgt.astype(np.float32)
else:
tf_layers_dict[layer_id] = decodedwgt
else:
if layer_id in wr_config and format_version == 1:
if wr_config[layer_id]['replace_mode'] == 'direct':
try:
tf_layers_dict[layer_id] = np.array(wr_config[layer_id]['values'])
except:
tf_layers_dict[layer_id] = wr_config[layer_id]['values']
elif wr_config[layer_id]['replace_mode'] == 'npy':
tf_layers_dict[layer_id] = np.load(wr_config[layer_id]['values'])
elif layer_id in wr_config and format_version >= 2 and wr_config[layer_id]['type'] == 'Const':
if wr_config[layer_id]['replace_mode'] == 'direct':
try:
tf_layers_dict[layer_id] = np.array(wr_config[layer_id]['values'])
except:
tf_layers_dict[layer_id] = wr_config[layer_id]['values']
elif wr_config[layer_id]['replace_mode'] == 'npy':
tf_layers_dict[layer_id] = np.load(wr_config[layer_id]['values'])
else:
if type(decodedwgt) == np.ndarray and decodedwgt.dtype == np.float64:
tf_layers_dict[layer_id] = decodedwgt.astype(np.float32)
else:
tf_layers_dict[layer_id] = decodedwgt
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to {layer.attrib["type"]} {wr_config[layer_id]["replace_mode"]} is not supported. layer_id: {layer_id}')
sys.exit(-1)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[layer_id]
)
else:
pass
layer_structure_print(
{
'layer_type': layer.attrib['type'],
'layer_id': layer_id,
'tf_layers_dict': tf_layers_dict[layer_id].shape
}
)
### Convolution
elif layer.attrib['type'] == 'Convolution':
# port0 = [int(sdim.text) for sdim in layer.find('input')[0]]
port1 = [int(sdim.text) for sdim in layer.find('input')[1]]
filters = int(port1[0])
dilations = [int(s) for s in data.attrib['dilations'].split(',')]
strides = [int(s) for s in data.attrib['strides'].split(',')]
pads_begin = 0
pads_end = 0
if not data is None and 'pads_begin' in data.attrib:
pads_begin = sum([int(s) for s in data.attrib['pads_begin'].split(',')])
if not data is None and 'pads_end' in data.attrib:
pads_end = sum([int(s) for s in data.attrib['pads_end'].split(',')])
padding = ''
if (pads_begin + pads_end) == 0:
if 'auto_pad' in data.attrib:
if data.attrib['auto_pad'] == 'same_upper' or data.attrib['auto_pad'] == 'same_lower':
padding = 'same'
else:
padding = 'valid'
else:
padding = 'valid'
else:
padding = 'same'
temp = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
if len(strides) == 2:
# Conv2D
kernel_size = [int(port1[2]), int(port1[3])]
orig = None
if pads_begin > 0:
padding = 'valid'
# begin 0 = top
# begin 1 = left
# end 0 = bottom
# end 1 = right
begin = [int(data.attrib['pads_begin'].split(',')[0]), int(data.attrib['pads_end'].split(',')[0])]
end = [int(data.attrib['pads_begin'].split(',')[1]), int(data.attrib['pads_end'].split(',')[1])]
orig = tf.keras.layers.ZeroPadding2D([begin, end])(temp)
else:
if temp.shape[0] == 1 and temp.shape[2] == 1 and temp.shape[3] == 1:
orig = tf.transpose(temp, perm=(0,2,3,1))
else:
orig = temp
try:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
orig
)
tf_layers_dict[layer_id] = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,3,1,0))
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,3,1,0))
)(orig)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,3,1,0))
)(orig)
except:
try:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
orig
)
tf_layers_dict[layer_id] = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].numpy().transpose(2,3,1,0))
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].numpy().transpose(2,3,1,0))
)(orig)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].numpy().transpose(2,3,1,0))
)(orig)
except:
# Weights from OP that are not fixed values
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
orig
)
tf_layers_dict[layer_id] = tf.nn.conv2d(
input=inp,
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding=padding.upper(),
dilations=dilations
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.nn.conv2d(
input=orig,
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding=padding.upper(),
dilations=dilations
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.nn.conv2d(
input=orig,
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding=padding.upper(),
dilations=dilations
)
elif len(strides) == 3:
# Conv3D - WIP padding same only
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv3D
"""
openvino = [C_OUT, C_IN, Z, Y, X] = [8,1,3,3,3] [16,8,3,3,3]
tf = [ Z, Y, X, C_IN, C_OUT] = [3,3,3,1,8] [3,3,3,8,16]
"""
kernel_size = [int(port1[2]), int(port1[3]), int(port1[4])]
try:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose((2,3,4,1,0)))
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose((2,3,4,1,0)))
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
dilation_rate=dilations,
use_bias=False,
kernel_initializer=Constant(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose((2,3,4,1,0)))
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
# Weights from OP that are not fixed values
# https://www.tensorflow.org/api_docs/python/tf/nn/conv3d
strides = [1, strides[0], strides[1], strides[2], 1]
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.nn.conv3d(
input=inp,
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding="SAME",
dilations=[1, dilations[0], dilations[1], dilations[2], 1]
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.nn.conv3d(
input=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding="SAME",
dilations=[1, dilations[0], dilations[1], dilations[2], 1]
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.nn.conv3d(
input=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)],
strides=strides,
padding="SAME",
dilations=[1, dilations[0], dilations[1], dilations[2], 1]
)
elif len(strides) == 1:
# Conv1D
"""
VINO:[N, C, W]
TF :[N, W, C]
VINO = input:[1,1024,16] filter:[512,1024,1]
TF = input:[1,16,1024] filter:[1,1024,512]
"""
kernel_size = [int(port1[2])]
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.nn.conv1d(
input=inp,
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,1,0),
stride=strides,
padding=padding.upper(),
dilations=dilations
)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.nn.conv1d(
input=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,1,0),
stride=strides,
padding=padding.upper(),
dilations=dilations
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.nn.conv1d(
input=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)],
filters=tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].transpose(2,1,0),
stride=strides,
padding=padding.upper(),
dilations=dilations
)
### Add
elif layer.attrib['type'] == 'Add':
# 'Fused_Add_' == BiasAdd
if len(tf_edges[layer_id]) == 2 and type(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]) == np.ndarray:
try:
# Biasadd or Add
edge_id0 = get_tf_edges_from(tf_edges, layer_id, 0)
edge_id1 = get_tf_edges_from(tf_edges, layer_id, 1)
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to {layer.attrib["type"]} {wr_config[layer_id]["replace_mode"]} is not supported. layer_id: {layer_id}')
sys.exit(-1)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
if tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].shape[-1] == tf_layers_dict[edge_id1].flatten().shape[0]:
inp = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1].flatten())
else:
inp = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
if tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].shape[-1] == tf_layers_dict[edge_id1].flatten().shape[0]:
tf_layers_dict[layer_id] = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1].flatten())
else:
tf_layers_dict[layer_id] = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1])
except:
# Add
edge_id0 = get_tf_edges_from(tf_edges, layer_id, 0)
edge_id1 = get_tf_edges_from(tf_edges, layer_id, 1)
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to {layer.attrib["type"]} {wr_config[layer_id]["replace_mode"]} is not supported. layer_id: {layer_id}')
sys.exit(-1)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
try:
inp = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1])
except:
inp = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1].transpose(0,2,3,1))
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
try:
tf_layers_dict[layer_id] = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1])
except:
tf_layers_dict[layer_id] = tf.math.add(tf_layers_dict[edge_id0], tf_layers_dict[edge_id1].transpose(0,2,3,1))
else:
# Add
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
print(f'{Color.RED}ERROR:{Color.RESET} Extrapolation of operations to {layer.attrib["type"]} {wr_config[layer_id]["replace_mode"]} is not supported. layer_id: {layer_id}')
sys.exit(-1)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
if len(get_tf_edges_from(tf_edges, layer_id)) == 2:
try:
tmp_layers = [tf_layers_dict[from_layer_id].transpose(0,2,3,1).astype(np.float32) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
inp = tf.math.add(tmp_layers[0], tmp_layers[1])
except:
try:
tmp_layers = [tf_layers_dict[from_layer_id].transpose(0,2,3,1) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
inp = tf.math.add(tmp_layers[0], tmp_layers[1])
except:
tmp_layers = [tf_layers_dict[from_layer_id] if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
inp = tf.math.add(tmp_layers[0], tmp_layers[1])
else:
inp = tf.math.add_n(
[tf_layers_dict[from_layer_id].transpose(0,2,3,1).astype(np.float32) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
)
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
if len(get_tf_edges_from(tf_edges, layer_id)) == 2:
try:
tmp_layers = [tf_layers_dict[from_layer_id].transpose(0,2,3,1).astype(np.float32) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
tf_layers_dict[layer_id] = tf.math.add(tmp_layers[0], tmp_layers[1])
except:
try:
tmp_layers = [tf_layers_dict[from_layer_id].transpose(0,2,3,1) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
tf_layers_dict[layer_id] = tf.math.add(tmp_layers[0], tmp_layers[1])
except:
tmp_layers = [tf_layers_dict[from_layer_id] if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
tf_layers_dict[layer_id] = tf.math.add(tmp_layers[0], tmp_layers[1])
else:
tf_layers_dict[layer_id] = tf.math.add_n(
[tf_layers_dict[from_layer_id].transpose(0,2,3,1).astype(np.float32) if type(tf_layers_dict[from_layer_id]) == np.ndarray else tf_layers_dict[from_layer_id] for from_layer_id in get_tf_edges_from(tf_edges, layer_id)]
)
### ReLU
elif layer.attrib['type'] == 'ReLU':
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.nn.relu(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.nn.relu(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.nn.relu(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
### PReLU
elif layer.attrib['type'] == 'PReLU':
input_len = len(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].shape)
alpha_len = len(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)].shape)
shared_axes = []
if alpha_len < 4:
if input_len == 4:
shared_axes = [1, 2]
elif input_len == 3:
shared_axes = [1]
else:
shared_axes = None
else:
shared_axes = None
temp_alpha = tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 1)]
if alpha_len > 1 and temp_alpha.size == 1:
temp_alpha = np.squeeze(temp_alpha)
alpha_len = 1
if alpha_len == 4 and temp_alpha.shape[0] == 1 and temp_alpha.shape[2] == 1 and temp_alpha.shape[3] == 1:
shared_axes = [1, 2]
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['type'] == 'PReLU' and wr_config[layer_id]['replace_mode'] == 'change_shared_axes':
shared_axes = wr_config[layer_id]['values']
if alpha_len == 4:
if replace_prelu_and_minmax:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
try:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, inp) + \
tf.minimum(0.0, temp_alpha.transpose(0,2,3,1) * inp)
except:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, inp) + \
tf.minimum(0.0, tf.transpose(temp_alpha, perm=[0,2,3,1]) * inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
try:
inp = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha.transpose(0,2,3,1) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
inp = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, tf.transpose(temp_alpha, perm=[0,2,3,1]) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
try:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha.transpose(0,2,3,1) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, tf.transpose(temp_alpha, perm=[0,2,3,1]) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
try:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha.transpose(0,2,3,1) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, tf.transpose(temp_alpha, perm=[0,2,3,1]) * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha.transpose(0,2,3,1)),
shared_axes=shared_axes
)(inp)
except:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(tf.transpose(temp_alpha, perm=[0,2,3,1])),
shared_axes=shared_axes
)(inp)
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
try:
inp = PReLU(
alpha_initializer=Constant(temp_alpha.transpose(0,2,3,1)),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
try:
inp = PReLU(
alpha_initializer=Constant(tf.transpose(temp_alpha, perm=[0,2,3,1])),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
inp = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha.transpose(0,2,3,1)),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(tf.transpose(temp_alpha, perm=[0,2,3,1])),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha.transpose(0,2,3,1)),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(tf.transpose(temp_alpha, perm=[0,2,3,1])),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
elif alpha_len >= 2:
if replace_prelu_and_minmax:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = \
tf.maximum(0.0, inp) + \
tf.minimum(0.0, temp_alpha * inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
try:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
# alpha_len == 1 (LeakyReLU)
if replace_prelu_and_minmax:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = \
tf.maximum(0.0, inp) + \
tf.minimum(0.0, temp_alpha * inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
elif output_edgetpu: # LeakyReLU -> Max/Min
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
tf_layers_dict[layer_id] = tf.maximum(0.0, inp) + tf.minimum(0.0, inp * temp_alpha)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
inp = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
tf_layers_dict[layer_id] = \
tf.maximum(0.0, tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]) + \
tf.minimum(0.0, temp_alpha * tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
if wr_config and layer_id in wr_config and format_version >= 2:
if wr_config[layer_id]['replace_mode'] == 'insert_before':
inp = extrapolation_of_layers(
wr_config[layer_id],
tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)]
)
try:
tf_layers_dict[layer_id] = LeakyReLU(
alpha=temp_alpha
)(inp)
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(inp)
elif wr_config[layer_id]['replace_mode'] == 'insert_after':
try:
inp = LeakyReLU(
alpha=temp_alpha
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
inp = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
tf_layers_dict[layer_id] = extrapolation_of_layers(
wr_config[layer_id],
inp
)
else:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
else:
try:
tf_layers_dict[layer_id] = LeakyReLU(
alpha=temp_alpha
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
except:
tf_layers_dict[layer_id] = PReLU(
alpha_initializer=Constant(temp_alpha),
shared_axes=shared_axes
)(tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)])
### Clamp
elif layer.attrib['type'] == 'Clamp':
cmin = None
cmax = None
if tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].dtype == np.int64:
cmin = np.asarray(data.attrib['min']).astype(np.int64)
cmax = np.asarray(data.attrib['max']).astype(np.int64)
elif tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].dtype == np.int32:
cmin = np.asarray(data.attrib['min']).astype(np.int32)
cmax = np.asarray(data.attrib['max']).astype(np.int32)
elif tf_layers_dict[get_tf_edges_from(tf_edges, layer_id, 0)].dtype == np.float32:
cmin = np.asarray(data.attrib['min']).astype(np.float32)
cmax = | np.asarray(data.attrib['max']) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import path
from numpy import linspace
class foreground():
# Contains coor, areas, numpts, dx,dy,dz and right and left boundaries for areas
def __init__(self, coor, vols, xyx_numpts_vec, dx_vec, dy_vec, dh_vec, xr, xl, yr, yl, hr, hl):
self.coor = coor
self.vols = vols
self.xyz_numpts_vec = xyx_numpts_vec
self.dx_vec = dx_vec
self.dy_vec = dy_vec
self.dh_vec = dh_vec
self.xr = xr
self.xl = xl
self.yr = yr
self.yl = yl
self.hr = hr
self.hl = hl
def generate_unif_particles(origin, xyzmax_vec, xyz_numpts_vec):
# Generate areas and positions from input origin, 3 orthogonal max displacements in x, y and z as [xmax, ymax, zmax], and a vector of the number of points in x, y and z to generate as np.array([numptsx, numptsy, numptsz])
xyz_totalnumpts_vec = xyz_numpts_vec
node_num=xyz_totalnumpts_vec[0]*xyz_totalnumpts_vec[1]*xyz_totalnumpts_vec[2]
x=np.zeros(xyz_totalnumpts_vec[0])
y=np.zeros(xyz_totalnumpts_vec[1])
h=np.zeros(xyz_totalnumpts_vec[2])
dx_vec=np.zeros(node_num)
dy_vec=np.zeros(node_num)
dh_vec=np.zeros(node_num)
xr_vec=np.zeros(node_num)
xl_vec=np.zeros(node_num)
yr_vec=np.zeros(node_num)
yl_vec=np.zeros(node_num)
hr_vec=np.zeros(node_num)
hl_vec=np.zeros(node_num)
coor=np.zeros((int(node_num),4))
print('Generating Uniform Particles...')
print("%s Nodes" %node_num)
vol=np.zeros(node_num)
dx=(xyzmax_vec[0]-origin[0])/(xyz_numpts_vec[0])
dy=(xyzmax_vec[1]-origin[1])/(xyz_numpts_vec[1])
if xyz_numpts_vec[2]>1: dh=(np.double(xyzmax_vec[2])-origin[2])/(np.double(xyz_numpts_vec[2]))
if xyz_numpts_vec[2]==1: dh=0
node=0
for i in range(xyz_totalnumpts_vec[2]):
if i>0: h[i]=h[i-1]+dh
if i==0: h[i]=origin[2]+(0.5)*dh
hr=h[i]+dh/2.0
hl=h[i]-dh/2.0
for j in range(xyz_totalnumpts_vec[0]):
if j>0: x[j]=x[j-1]+dx
if j==0: x[j]=origin[0]+(0.5)*dx
xr=x[j]+dx/2.0
xl=x[j]-dx/2.0
for k in range(xyz_totalnumpts_vec[1]):
if k>0: y[k]=y[k-1]+dy
if k==0: y[k]=origin[1]+(0.5)*dy
yr=y[k]+dy/2.0
yl=y[k]-dy/2.0
vol[node]=(xr-xl)*(yr-yl)
if hr!=hl: vol[node]=vol[node]*(hr-hl)
coor[node,:]=[node+1,x[j],y[k],h[i]]
xr_vec[node]=xr
xl_vec[node]=xl
yr_vec[node]=yr
yl_vec[node]=yl
hr_vec[node]=hr
hl_vec[node]=hl
dx_vec[node]=xr-xl
dy_vec[node]=yr-yl
dh_vec[node]=hr-hl
node=node+1
coor=np.vstack(([node,0,0,0], coor))
G=foreground(coor, vol, xyz_numpts_vec, dx_vec, dy_vec, dh_vec, xr_vec, xl_vec, yr_vec, yl_vec, hr_vec, hl_vec)
print("Complete")
return(G)
def vis_particles2d(G):
# Generate Scatter plot of saved point cloud data
plt.scatter(G.coor[1:,1], G.coor[1:,2])
plt.show()
def save_geometry(G):
# outputs Geometry.dat including input coordinates and areas of the PD nodes
print("Saving input files...")
coor=G.coor
vol=G.vols
dx_vec=np.transpose(np.array([G.dx_vec]))
dy_vec=np.transpose(np.array([G.dy_vec]))
dz_vec=np.transpose(np.array([G.dh_vec]))
nodes=coor.shape[0]-1
with open("Geometry.dat", "w") as f:
f.write("%6d \t\t\t x \t\t\t\t\t y\t\t\t\t\t\t z \t\t\t\t\tarea \n" % (nodes) )
data=np.hstack((coor[1:nodes+1,:], np.transpose(np.array([vol[:nodes]]))))
with open("Geometry.dat", "ab") as f:
np.savetxt(f, np.vstack((data)), fmt='%6i %e %e %e %e')
print("Save Complete, %d nodes"%nodes)
def subt_rect_dom_fg(G, p1, p2, p3, p4):
#Subtracts rectangle primitive domain from particles domain given 4 corner point vectors as nparray p1 p2 p3 p4
xyz=G.coor[1:,1:-1]
temp=G.coor[1:,:]
p = path.Path([p1,p2,p3,p4])
bools=p.contains_points(xyz)
bools=np.invert(p.contains_points(xyz))
nodenum=np.sum(bools)
G.coor=temp[bools]
for i in range(G.coor.shape[0]):
G.coor[i,0]=i+1
G.coor=np.vstack((np.array([nodenum,0,0,0]),G.coor))
G.vols=G.vols[bools]
G.dx_vec = G.dx_vec[bools]
G.dy_vec = G.dy_vec[bools]
G.dh_vec = G.dh_vec[bools]
G.xr = G.xr[bools]
G.xl = G.xl[bools]
G.yr = G.yr[bools]
G.yl = G.yl[bools]
G.hr = G.hr[bools]
G.hl = G.hl[bools]
return(G)
def in_cube(xyz, xmin, xmax, ymin, ymax, zmin, zmax):
out = np.logical_and(np.logical_and(np.logical_and(xyz[:, 0]>=xmin, xyz[:, 0]<=xmax), np.logical_and(xyz[:, 1]>=ymin, xyz[:, 1]<=ymax)), np.logical_and(xyz[:, 2]>=zmin, xyz[:, 2]<=zmax))
return(out)
def subt_cubic_dom_fg(G, xmin, xmax, ymin, ymax, zmin, zmax):
#Subtracts cubic primitive domain from particles domain given 4 corner point vectors as nparray p1 p2 p3 p4
xyz=G.coor[1:,1:]
temp=G.coor[1:,:]
bools=in_cube(xyz, xmin, xmax, ymin, ymax, zmin, zmax)
bools=np.invert(bools)
nodenum=np.sum(bools)
G.coor=temp[bools]
for i in range(G.coor.shape[0]):
G.coor[i,0]=i+1
G.coor=np.vstack((np.array([nodenum,0,0,0]),G.coor))
G.vols=G.vols[bools]
G.dx_vec = G.dx_vec[bools]
G.dy_vec = G.dy_vec[bools]
G.dh_vec = G.dh_vec[bools]
G.xr = G.xr[bools]
G.xl = G.xl[bools]
G.yr = G.yr[bools]
G.yl = G.yl[bools]
G.hr = G.hr[bools]
G.hl = G.hl[bools]
return(G)
def in_circle(xyz, center, radius):
# Determine if point(s) is(are) within the 2d circular region defined by center and radius
out = np.sqrt(np.sum(np.multiply(np.array(xyz)-center,np.array(xyz)-center), axis=1))>=radius
return(out)
def subt_circular_domain(G, center, radius):
# Subtracts cirucular domain from particles given center and radius
xyz=G.coor[1:,1:]
temp=G.coor[1:,:]
bools=in_circle(xyz, center, radius)
nodenum=np.sum(bools)
G.coor=temp[bools]
for i in range(G.coor.shape[0]):
G.coor[i,0]=i+1
G.coor=np.vstack(( | np.array([nodenum,0,0,0]) | numpy.array |
import numpy as np
from cartesian.cgp import Primitive
from scipy.stats import skew, kurtosis
#################################################################
# Here we define pool of functions available for CGP experiments.
#################################################################
############################## Utils functions ##############################
def float2index(vector, y):
"""
Transform float y representation to (integer) index to vector.
:param vector: Strictly one-dim np.array.
:param y: Any np.array or scalar.
:return: Index to vector.
"""
l = vector.shape[0]
index_f = np.mean(np.abs((y + 1) / 2))
return int(np.nan_to_num(np.min((np.max(((l-1) * index_f, 0)), l-1))))
def common_submatrices(x, y):
if len(x.shape) == 1:
x = np.expand_dims(x, 0)
if len(y.shape) == 1:
y = np.expand_dims(y, 0)
(x1, x2), (y1, y2) = x.shape, y.shape
n1, n2 = min(x1, y1), min(x2, y2)
return x[0:n1, 0:n2], y[0:n1, 0:n2]
def scaled_array(array):
array[~np.isfinite(array)] = 0.0
array[array < -1.0] = -1.0
array[array > 1.0] = 1.0
return array
def scaled_scalar(number):
if not np.isfinite(number):
return 0.0
else:
return np.min((np.max((number, -1.0)), 1.0))
############################## Mathematical functions ##############################
def cgp_inner_ypow(x, y):
if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)):
x, y = common_submatrices(x, y)
return np.power(np.abs(x), np.abs(y))
def cgp_inner_sqrtxy(x, y):
if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)):
x, y = common_submatrices(x, y)
return np.sqrt(x**2 + y**2) / np.sqrt(2)
cgp_add = Primitive("add", lambda x, y: sum(*common_submatrices(x, y)) / 2.0 if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)) else (x+y)/2.0, 2)
cgp_aminus = Primitive("aminus", lambda x, y: np.abs(sum(*common_submatrices(x, -y))) / 2.0 if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)) else np.abs(x-y)/2.0, 2)
cgp_mult = Primitive("mult", lambda x, y: np.multiply(*common_submatrices(x, y)) if (isinstance(x, np.ndarray) and isinstance(y, np.ndarray)) else x*y, 2)
cgp_inv = Primitive("inverse", lambda x: scaled_array(np.divide(1, x)) if isinstance(x, np.ndarray) else scaled_scalar(np.divide(1, x)), 1)
cgp_abs = Primitive("abs", lambda x: np.abs(x), 1)
cgp_sqrt = Primitive("sqrt", lambda x: np.sqrt(np.abs(x)), 1)
cgp_ypow = Primitive("ypow", cgp_inner_ypow, 2)
cgp_expx = Primitive("expx", lambda x: (np.exp(x) - 1) / (np.exp(1) - 1), 1)
cgp_sinx = Primitive("sinx", lambda x: | np.sin(x) | numpy.sin |
import datetime
import locale
import os
import matplotlib
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from netCDF4 import num2date
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime.datetime(2017, 2, 24)
__modified__ = datetime.datetime(2019, 1, 21)
__version__ = "1.0"
__status__ = "Development"
matplotlib.rcParams.update({'font.size': 14})
class FormatScalarFormatter(matplotlib.ticker.ScalarFormatter):
def __init__(self, fformat="%1.1f", offset=True, mathText=True):
self.fformat = fformat
matplotlib.ticker.ScalarFormatter.__init__(self, useOffset=offset,
useMathText=mathText)
def _set_format(self):
self.format = self.fformat
if self._useMathText:
self.format = r'$\mathdefault{%s}$' % self.format
class StationPlot:
# Used for contour plots
def define_variable_specifics(self, varname):
return {"temp": {"vmin": 4, "vmax": 17, "title": "{}: Temperatur ($^o$C)".format(self.name)},
"salt": {"vmin": 25, "vmax": 35, "title": "{}: Saltholdighet".format(self.name)},
"ox": {"vmin": 0, "vmax": 12, "title": "{}: Oksygenkons.(mlO$_2$/L)".format(self.name)},
"oxsat": {"vmin": 0, "vmax": 120, "title": "{}: Oksygenmetning ($\%$)".format(self.name)},
"ftu": {"vmin": 0, "vmax": 5, "title": "{}: Fluorescence".format(self.name)}}[varname]
# Used for variability over time at fixed depths
def define_variable_specifics_3m(self, varname):
return {"temp": {"vmin": 2, "vmax": 19, "ylabel": "Temperatur ($^o$C)", "title": "{}".format(self.name)},
"salt": {"vmin": 25, "vmax": 35.3, "ylabel": "Saltholdighet", "title": "{}".format(self.name)},
"ox": {"vmin": 2, "vmax": 12, "ylabel": "Oksygenkons.(mlO$_2$/L)", "title": "{}".format(self.name)},
"oxsat": {"vmin": 0, "vmax": 120, "ylabel": "Oksygenmetning ($\%$)", "title": "{}".format(self.name)},
"ftu": {"vmin": 0, "vmax": 5, "ylabel": "Fluorescence", "title": "{}".format(self.name)}}[varname]
# Used for variability over time at fixed depths
def define_variable_specifics_100_200m(self, varname):
return {"temp": {"vmin": 6, "vmax": 11, "ylabel": "Temperatur ($^o$C)", "title": "{}".format(self.name)},
"salt": {"vmin": 34, "vmax": 35.3, "ylabel": "Saltholdighet", "title": "{}".format(self.name)},
"ox": {"vmin": 2, "vmax": 8, "ylabel": "Oksygenkons.(mlO$_2$/L)", "title": "{}".format(self.name)},
"oxsat": {"vmin": 0, "vmax": 120, "ylabel": "Oksygenmetning ($\%$)", "title": "{}".format(self.name)},
"ftu": {"vmin": 0, "vmax": 5, "ylabel": "Fluorescence", "title": "{}".format(self.name)}}[varname]
def define_array_for_variable(self, varname):
return {"temp": self.sectionTE,
"salt": self.sectionSA,
"ox": self.sectionOX,
"ftu": self.sectionFTU,
"oxsat": self.sectionOXS}[varname]
# Save figures to file depending on what sort of plot (plot_type)
def save_to_file(self, CTDConfig, var_name, plot_type, work_dir, selected_depth=None, dateObjectStart=None,
dateObjectEnd=None):
figures_path = os.path.join(work_dir,'figures', CTDConfig.survey)
if not os.path.isdir(figures_path):
os.makedirs(figures_path)
if plot_type == "timeseries":
start = dateObjectStart.strftime("%Y%m%d")
stop = dateObjectEnd.strftime("%Y%m%d")
filename = f"timeseries-{self.name}-{var_name}-{start}-to-{stop}.png"
else:
filename = f"{plot_type}-{self.name}-{var_name}-{selected_depth}.png"
plotfileName = os.path.join(figures_path, filename)
if os.path.exists(plotfileName):
os.remove(plotfileName)
print("Saving time series to file {}".format(plotfileName))
plt.savefig(plotfileName, dpi=300, bbox_inches='tight')
plt.close()
def get_depthindex(self, Y, max_depth, ax, varname):
if float(max_depth) > 250:
ax.set_ylim(-250, 0)
depthindex = np.where(Y == 250)[0][0]
elif 150 < float(150) < 190:
ax.set_ylim(150, 0)
depthindex = np.where(Y == max_depth)[0][0]
elif 120 < float(120) < 150:
ax.set_ylim(120, 0)
depthindex = np.where(Y == max_depth)[0][0]
else:
depthindex = -1
if self.name in ["S16", "S10", "SOE72", "Lind1"]:
ax.set_ylim(-40, 0)
depthindex = np.where(Y == 40)[0][0]
if self.name in ["S22"]:
ax.set_ylim(-25, 0)
depthindex = np.where(Y == 25)[0][0]
if self.name == "OKS1" or self.name == "OFOT1":
ax.set_ylim(-150, 0)
depthindex = np.where(Y == 150)[0][0]
if self.name == "NORD2":
ax.set_ylim(-225, 0)
depthindex = np.where(Y == 225)[0][0]
if self.name in ["VT79"]:
ax.set_ylim(-500, 0)
depthindex = np.where(Y == 450)[0][0]
if self.name in ["VT69"]:
ax.set_ylim(-20, 0)
depthindex = np.where(Y == 20)[0][0]
if self.name in ["VT70"]:
ax.set_ylim(-590, 0)
depthindex = np.where(Y == 590)[0][0]
if self.name in ["VT75"]:
ax.set_ylim(-180, 0)
depthindex = np.where(Y == 180)[0][0]
if self.name in ["VT52"]:
ax.set_ylim(-370, 0)
depthindex = np.where(Y == 370)[0][0]
if self.name in ["VT74"]:
ax.set_ylim(-230, 0)
depthindex = np.where(Y == 230)[0][0]
if self.name in ["VT53"]:
ax.set_ylim(-850, 0)
depthindex = np.where(Y == 850)[0][0]
if self.name in ["VT16"] and varname in ["oxsat", "ox"]:
ax.set_ylim(-1250, 0)
depthindex = np.where(Y == 1250)[0][0]
if self.name in ["VT16"] and varname in ["temp", "salt"]:
ax.set_ylim(-300, 0)
depthindex = np.where(Y == 300)[0][0]
if varname in ["ftu"]:
if ax.get_ylim()[0] < -100:
ax.set_ylim(-100, 0)
depthindex = np.where(Y == 100)[0][0]
return depthindex
def createHistoricalTimeseries(self, CTDConfig,work_dir):
dates = [num2date(jd, units=CTDConfig.refdate, calendar="standard") for jd in self.julianDay]
# Either we plot all the years or a selection (e.g. years=[2017,2018])
startdate = dates[0]
enddate = dates[-1]
steps = (enddate.year - startdate.year) + 1
years = [int(startdate.year) + i for i in range(steps)]
n_months = 12
smooth = False
print("Using smoothing for timeseries: {}".format(smooth))
if self.name in ["VT69", "VT70"]:
if self.name in ["VT69"]:
CTDConfig.selected_depths = [5]
elif self.name in ["VT70"]:
CTDConfig.selected_depths = [5, 100, 200]
print("RUNNING HISTORICAL ", self.name, CTDConfig.selected_depths)
for var_name in ["temp", "salt"]: # ,"salt","ox"]:
for sub_index, selected_depth in enumerate(CTDConfig.selected_depths):
fig, ax = plt.subplots(nrows=1)
colormap = plt.cm.plasma
colors = [colormap(i) for i in np.linspace(0, 0.9, len(years))]
# Size of data is number of years for data storage
all_data = np.empty((len(years), n_months))
Z = self.define_array_for_variable(var_name)
depthindex = np.where(self.Y == selected_depth)[0][0]
all_dates = np.zeros((len(years), 12))
print("STATION {} DEPTH {} FOUND DEPTH {}".format(self.name, selected_depth, self.Y[depthindex]))
# Extract only data for each individual year and save by month
for ind, d in enumerate(dates):
year_index = years.index(d.year)
all_data[year_index, int(d.month - 1)] = Z[ind, depthindex]
all_dates[year_index, d.month - 1] = self.julianDay[ind]
# Mask the data to remove months where observations may not exist
all_data = np.ma.masked_where(all_data < 0.1, all_data)
if var_name in ["salt"]:
all_data = np.ma.masked_where(all_data < 28, all_data)
if var_name in ["salt", "temp"]:
# Salinity above 35.2 not likely in inner fjords.
all_data = np.ma.masked_where(all_data > 35.2, all_data)
all_data = np.ma.masked_invalid(all_data)
legendlist = []
# Create the smoothed series of data and plot
for ind in range(len(years)):
changed_dates = []
legendlist.append("{}".format(years[ind]))
alld = num2date(all_dates[ind, :], units=CTDConfig.refdate, calendar="standard")
# Mock the year in dates so all plots from different years can be using the same
# x-axis and be overlaid on top of eachother
for d in alld:
changed_dates.append(datetime.datetime(years[0], d.month, d.day, d.hour))
# Smooth the data by creating a pandas Series object which is resampled at high frequency
ser = pd.Series(all_data[ind, :], index=pd.to_datetime(changed_dates))
ser = ser.dropna()
if smooth:
smoothed = ser.resample("60T").apply(['median'])
tsint = smoothed.interpolate(method='cubic')
# Get the vmin and vmax limits for the specific plot
if selected_depth < 10:
specs = self.define_variable_specifics_3m(var_name)
else:
specs = self.define_variable_specifics_100_200m(var_name)
# ser=ser.mask(ser>36)
ax.set_ylim(float(specs["vmin"]), float(specs["vmax"]))
if not smooth:
ax = ser.loc['2013-01-01':'2030-01-01'].plot(color=colors[ind], lw=2.2, ax=ax,
label=str(years[ind]))
# SMOOTHED version uncomment next line
if smooth:
ax = tsint.loc['2013-01-01':'2020-01-01'].plot(color=colors[ind], ax=ax, lw=2.2,
label=str(years[ind]), x_compat=True)
# SHOW DOTS FOR original positions uncomment next line
# ax[ind] = ser.loc['2017-01-01':'2030-01-01'].plot(style="o",ms=5,ax=ax) #,label=str(years[ind]))
ax.xaxis.set_tick_params(reset=True)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter(''))
# Make sure the month names are in Norwegian
locale.setlocale(locale.LC_ALL, "no_NO")
leg = ax.legend(legendlist, loc=1, prop={'size': 10})
label = " {}m".format(selected_depth)
ax.text(0.1, 0.1,
label,
size=12,
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.xaxis.set_tick_params(reset=True)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.set_xlabel("Dato")
ax.set_ylabel(specs["ylabel"], multialignment='center')
ax.set_title(specs["title"])
plt.tight_layout()
if not smooth:
self.save_to_file(CTDConfig, var_name, 'annual_variability_historical',work_dir,
selected_depth=str(selected_depth))
else:
self.save_to_file(CTDConfig, var_name, 'annual_variability_historical_smoothed',work_dir,
selected_depth=str(selected_depth))
plt.clf()
def createTimeseriesPlot(self, CTDConfig):
dates = [num2date(jd, units=CTDConfig.refdate, calendar="standard") for jd in self.julianDay]
if len(dates)> 0:
# Either we plot all the years or a selection (e.g. years=[2017,2018])
startdate = dates[0]
enddate = dates[-1]
steps = (enddate.year - startdate.year) + 1
years = [int(startdate.year) + i for i in range(steps)]
n_months = 12
if self.get_max_depth() < 100:
CTDConfig.selected_depths = [5]
elif self.get_max_depth() < 200:
CTDConfig.selected_depths = [5, 100]
elif self.get_max_depth() < 300:
CTDConfig.selected_depths = [5, 100, 200]
for var_name in ["temp","salt"]: # ,"salt","ox"]:
fig, ax = plt.subplots(nrows=3)
if self.name in ["VT75"]:
fig, ax = plt.subplots(nrows=2)
colormap = plt.cm.plasma
colors = [colormap(i) for i in np.linspace(0, 0.9, len(years))]
legendlist = []
# Loop over all depths to be plotted as individual figures
for sub_index, selected_depth in enumerate(CTDConfig.selected_depths):
# Size of data is number of years for data storage
all_data = np.empty((len(years), n_months))
Z = self.define_array_for_variable(var_name)
depthindex = np.where(self.Y == selected_depth)[0][0]
all_dates = np.zeros((len(years), 12))
# Extract only data for each individual year and save by month
for ind, d in enumerate(dates):
year_index = years.index(d.year)
all_data[year_index, int(d.month - 1)] = Z[ind, depthindex]
all_dates[year_index, d.month - 1] = self.julianDay[ind]
# Mask the data to remove months where observations may not exist
all_data = np.ma.masked_where(all_data < 0.1, all_data)
all_data = np.ma.masked_where(all_data > 150, all_data)
# Create the smoothed series of data and plot
for ind in range(len(years)):
changed_dates = []
legendlist.append("{}".format(years[ind]))
alld = num2date(all_dates[ind, :], units=CTDConfig.refdate, calendar="standard")
# Mock the year in dates so all plots from different years can be using the same
# x-axis and be overlaid on top of eachother
for d in alld:
changed_dates.append(datetime.datetime(years[0], d.month, d.day, d.hour))
# Smooth the data by creating a pandas Series object which is resampled at high frequency
ser = pd.Series(all_data[ind, :], index=pd.to_datetime(changed_dates))
# smoothed = ser.resample("60T").apply(['median'])
# tsint = smoothed.interpolate(method='cubic')
# Get the vmin and vmax limits for the specific plot
if selected_depth < 10:
specs = self.define_variable_specifics_3m(var_name)
else:
specs = self.define_variable_specifics_100_200m(var_name)
# ser=ser.mask(ser>36)
ax[sub_index].set_ylim(float(specs["vmin"]), float(specs["vmax"]))
ax[sub_index] = ser.loc['2013-01-01':'2030-01-01'].plot(color=colors[ind], lw=1.5, ax=ax[sub_index],
label=str(years[ind]))
# SMOOTHED version uncomment next line
# ax[ind] = tsint.loc['2017-01-01':'2030-01-01'].plot(color=colors[ind],ax=ax,lw=2,label=str(years[ind]),x_compat=True)
# SHOW DOTS FOR original positions uncomment next line
# ax[ind] = ser.loc['2017-01-01':'2030-01-01'].plot(style="o",ms=5,ax=ax) #,label=str(years[ind]))
ax[sub_index].xaxis.set_tick_params(reset=True)
ax[sub_index].xaxis.set_major_locator(mdates.MonthLocator())
ax[sub_index].xaxis.set_major_formatter(mdates.DateFormatter(''))
# Make sure the month names are in Norwegian
locale.setlocale(locale.LC_ALL, "no_NO")
if self.name in ["VT75"]:
leg = ax[1].legend(legendlist, loc=1, prop={'size': 10})
else:
leg = ax[2].legend(legendlist, loc=1, prop={'size': 10})
for j, selected_depth in enumerate(CTDConfig.selected_depths):
label = " {}m".format(selected_depth)
if self.name in ["VT75"]:
yy=1.15
else:
yy=1.5
ax[j].text(0.1, yy,
label,
size=12,
horizontalalignment='right',
verticalalignment='top',
transform=ax[j].transAxes)
if self.name in ["VT75"]:
ax[1].xaxis.set_tick_params(reset=True)
ax[1].xaxis.set_major_locator(mdates.MonthLocator())
ax[1].xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax[1].set_xlabel("Dato")
else:
ax[2].xaxis.set_tick_params(reset=True)
ax[2].xaxis.set_major_locator(mdates.MonthLocator())
ax[2].xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax[2].set_xlabel("Dato")
ax[1].set_ylabel(specs["ylabel"], multialignment='center')
if self.survey not in ['Soerfjorden']:
ax[0].set_title(specs["title"])
plt.tight_layout()
self.save_to_file(CTDConfig, var_name,
'annual_variability',
CTDConfig.work_dir,
selected_depth=str(selected_depth))
plt.clf()
else:
print('Empty dates', CTDConfig)
def createContourPlots(self, CTDConfig):
xticklabels = []
yticklabels = []
for d, dd in enumerate(self.julianDay):
dateObject = num2date(self.julianDay[d], units=CTDConfig.refdate, calendar="standard")
self.X[d] = self.julianDay[d]
xticklabels.append(str(dateObject.year) + "-" + str(dateObject.month))
for yy in self.Y: yticklabels.append(str(-(np.max(self.Y) - yy)))
plt.set_cmap('RdYlBu_r')
varNames = ["temp", "salt", "ox", "ftu", "oxsat"]
if self.name in ['SJON1', 'SJON2']:
varNames = ["temp", "salt", "ox", "oxsat"]
for i in range(len(varNames)):
plt.clf()
fig, ax = plt.subplots()
# Get data and settings for station
specs = self.define_variable_specifics(varNames[i])
Z = self.define_array_for_variable(varNames[i])
Z = np.ma.masked_where(Z < 0, Z)
if varNames[i] == "ftu":
Z = np.ma.masked_where(Z > 20, Z)
delta = (specs["vmax"] - specs["vmin"]) / 15
levels = | np.arange(specs["vmin"], specs["vmax"], delta) | numpy.arange |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from test.bigdl.test_zoo_utils import ZooTestCase
import bigdl.dllib.keras.layers as ZLayer
from bigdl.dllib.keras.models import Model as ZModel
from bigdl.dllib.keras.models import Sequential as ZSequential
import keras.layers as KLayer
from keras.engine import merge as kmerge, Model as KModel
from keras.models import Sequential as KSequential
import keras.backend as K
from bigdl.dllib.keras.converter import WeightsConverter
from bigdl.dllib.keras import regularizers
np.random.seed(1337) # for reproducibility
class TestLayer(ZooTestCase):
def test_embedding(self):
input_data = np.random.randint(1000, size=(32, 10))
zlayer = ZLayer.Embedding(1000, 64, input_shape=(10, ))
klayer = KLayer.Embedding(1000, 64, input_length=10)
self.compare_layer(klayer, zlayer, input_data,
WeightsConverter.convert_embedding)
def test_batchnormalization(self):
print("Running batch normal test")
K.set_image_dim_ordering("th")
input_data = np.random.random_sample([2, 5, 32, 32])
zlayer = ZLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
klayer = KLayer.BatchNormalization(axis=1, input_shape=(5, 32, 32))
self.compare_layer(klayer, zlayer, input_data,
WeightsConverter.convert_batchnormalization)
K.set_image_dim_ordering("tf")
input_data2 = np.random.random_sample([2, 32, 32, 4])
zlayer = ZLayer.BatchNormalization(axis=-1, dim_ordering="tf", input_shape=(32, 32, 4))
klayer = KLayer.BatchNormalization(axis=-1, input_shape=(32, 32, 4))
self.compare_layer(klayer, zlayer, input_data2,
WeightsConverter.convert_batchnormalization)
K.set_image_dim_ordering("th")
input_data = np.random.random_sample([2, 5])
zlayer = ZLayer.BatchNormalization(axis=1, input_shape=(5,))
klayer = KLayer.BatchNormalization(axis=1, input_shape=(5,))
self.compare_layer(klayer, zlayer, input_data,
WeightsConverter.convert_batchnormalization)
def test_merge_sum(self):
z1 = ZLayer.InputLayer(input_shape=(3, 5))
z2 = ZLayer.InputLayer(input_shape=(3, 5))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="sum")
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode="sum")
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_mul(self):
z1 = ZLayer.InputLayer(input_shape=(3, 5))
z2 = ZLayer.InputLayer(input_shape=(3, 5))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="mul")
k1 = KLayer.InputLayer(input_shape=(3, 5))
k2 = KLayer.InputLayer(input_shape=(3, 5))
klayer = KLayer.Merge(layers=[k1, k2], mode="mul")
input_data = [np.random.random([2, 3, 5]), np.random.random([2, 3, 5])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_ave(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 8))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="ave")
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="ave")
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_max(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 8))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="max")
k1 = KLayer.InputLayer(input_shape=(2, 5, 8))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="max")
input_data = [np.random.random([3, 2, 5, 8]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_concat(self):
z1 = ZLayer.InputLayer(input_shape=(2, 5, 11))
z2 = ZLayer.InputLayer(input_shape=(2, 5, 8))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="concat")
k1 = KLayer.InputLayer(input_shape=(2, 5, 11))
k2 = KLayer.InputLayer(input_shape=(2, 5, 8))
klayer = KLayer.Merge(layers=[k1, k2], mode="concat")
input_data = [np.random.random([3, 2, 5, 11]), np.random.random([3, 2, 5, 8])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_dot(self):
z1 = ZLayer.InputLayer(input_shape=(4, ))
z2 = ZLayer.InputLayer(input_shape=(4, ))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="dot")
k1 = KLayer.InputLayer(input_shape=(4, ))
k2 = KLayer.InputLayer(input_shape=(4, ))
klayer = KLayer.Merge(layers=[k1, k2], mode="dot")
input_data = [np.random.random([2, 4]), np.random.random([2, 4])]
self.compare_layer(klayer, zlayer, input_data)
def test_merge_cos(self):
z1 = ZLayer.InputLayer(input_shape=(3, ))
z2 = ZLayer.InputLayer(input_shape=(3, ))
zlayer = ZLayer.Merge(layers=[z1, z2], mode="cos")
k1 = KLayer.InputLayer(input_shape=(3, ))
k2 = KLayer.InputLayer(input_shape=(3, ))
klayer = KLayer.Merge(layers=[k1, k2], mode="cos")
input_data = [np.random.random([2, 3]), np.random.random([2, 3])]
self.compare_layer(klayer, zlayer, input_data)
def convert_two_dense(self, kmodel, weights):
return [weights[2].T, weights[3], weights[0].T, weights[1]]
def test_merge_method_sum(self):
zx1 = ZLayer.Input(shape=(8, ))
zx2 = ZLayer.Input(shape=(6, ))
zy1 = ZLayer.Dense(10)(zx1)
zy2 = ZLayer.Dense(10)(zx2)
zz = ZLayer.merge([zy1, zy2], mode="sum")
zmodel = ZModel([zx1, zx2], zz, name="graph1")
kx1 = KLayer.Input(shape=(8, ))
kx2 = KLayer.Input(shape=(6, ))
ky1 = KLayer.Dense(10)(kx1)
ky2 = KLayer.Dense(10)(kx2)
kz = kmerge([ky1, ky2], mode="sum")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 8]), np.random.random([2, 6])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_merge_method_model_concat(self):
zx1 = ZLayer.Input(shape=(4, ))
zx2 = ZLayer.Input(shape=(5, ))
zy1 = ZLayer.Dense(6, activation="sigmoid")(zx1)
zbranch1 = ZModel(zx1, zy1)(zx1)
zbranch2 = ZLayer.Dense(8)(zx2)
zz = ZLayer.merge([zbranch1, zbranch2], mode="concat")
zmodel = ZModel([zx1, zx2], zz)
kx1 = KLayer.Input(shape=(4, ))
kx2 = KLayer.Input(shape=(5, ))
ky1 = KLayer.Dense(6, activation="sigmoid")(kx1)
kbranch1 = KModel(kx1, ky1)(kx1)
kbranch2 = KLayer.Dense(8)(kx2)
kz = KLayer.merge([kbranch1, kbranch2], mode="concat")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 4]), np.random.random([2, 5])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_merge_method_seq_concat(self):
zx1 = ZLayer.Input(shape=(10, ))
zx2 = ZLayer.Input(shape=(10, ))
zy1 = ZLayer.Dense(12, activation="sigmoid")(zx1)
zbranch1_node = ZModel(zx1, zy1)(zx1)
zbranch2 = ZSequential()
zbranch2.add(ZLayer.Dense(12, input_dim=10))
zbranch2_node = zbranch2(zx2)
zz = ZLayer.merge([zbranch1_node, zbranch2_node], mode="concat")
zmodel = ZModel([zx1, zx2], zz)
kx1 = KLayer.Input(shape=(10, ))
kx2 = KLayer.Input(shape=(10, ))
ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
kbranch1_node = KModel(kx1, ky1)(kx1)
kbranch2 = KSequential()
kbranch2.add(KLayer.Dense(12, input_dim=10))
kbranch2_node = kbranch2(kx2)
kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
kmodel = KModel([kx1, kx2], kz)
input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
def test_reshape(self):
a = np.random.random((2, 2, 3, 4))
i1 = ZLayer.Input(shape=(2, 3, 4))
s = ZLayer.Reshape((-1, 2, 12))(i1)
m = ZModel(i1, s)
# predict should not generate exception
y = m.predict(a, distributed=False)
def test_regularizer(self):
model = ZSequential()
model.add(ZLayer.Dense(16, W_regularizer=regularizers.l2(0.001),
activation='relu', input_shape=(10000,)))
model.summary()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
def test_transformer_forward_backward(self):
layer = ZLayer.TransformerLayer.init(
vocab=200, hidden_size=128, n_head=4, seq_len=20)
train_token = np.random.randint(20, size=(2, 20))
train_pos = np.zeros((2, 20), dtype=np.int32)
input = [train_token, train_pos]
self.assert_forward_backward(layer, input)
def test_bert_forward_backward(self):
layer = ZLayer.BERT.init(
vocab=200, hidden_size=128, n_head=4, seq_len=20, intermediate_size=20)
train_token = np.random.randint(20, size=(2, 20))
token_type_id = np.zeros((2, 20), dtype=np.int32)
train_pos = | np.zeros((2, 20), dtype=np.int32) | numpy.zeros |
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
============
Traits Types
============
.. currentmodule:: bqplot.traits
.. autosummary::
:toctree: _generate/
Date
"""
from traitlets import Instance, TraitError, TraitType, Undefined
import traittypes as tt
import numpy as np
import pandas as pd
import warnings
import datetime as dt
import six
import warnings
# Date
def date_to_json(value, obj):
if value is None:
return value
else:
return value.strftime('%Y-%m-%dT%H:%M:%S.%f')
def date_from_json(value, obj):
if value:
return dt.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
else:
return value
date_serialization = dict(to_json=date_to_json, from_json=date_from_json)
class Date(TraitType):
"""
A datetime trait type.
Converts the passed date into a string format that can be used to
construct a JavaScript datetime.
"""
def validate(self, obj, value):
try:
if isinstance(value, dt.datetime):
return value
if isinstance(value, dt.date):
return dt.datetime(value.year, value.month, value.day)
if np.issubdtype(np.dtype(value), np.datetime64):
# TODO: Fix this. Right now, we have to limit the precision
# of time to microseconds because np.datetime64.astype(datetime)
# returns date values only for precision <= 'us'
value_truncated = np.datetime64(value, 'us')
return value_truncated.astype(dt.datetime)
except Exception:
self.error(obj, value)
self.error(obj, value)
def __init__(self, default_value=dt.datetime.today(), **kwargs):
args = (default_value,)
self.default_value = default_value
super(Date, self).__init__(args=args, **kwargs)
self.tag(**date_serialization)
def convert_to_date(array, fmt='%m-%d-%Y'):
# If array is a np.ndarray with type == np.datetime64, the array can be
# returned as such. If it is an np.ndarray of dtype 'object' then conversion
# to string is tried according to the fmt parameter.
if(isinstance(array, np.ndarray) and np.issubdtype(array.dtype, np.datetime64)):
# no need to perform any conversion in this case
return array
elif(isinstance(array, list) or (isinstance(array, np.ndarray) and array.dtype == 'object')):
return_value = []
# Pandas to_datetime handles all the cases where the passed in
# data could be any of the combinations of
# [list, nparray] X [python_datetime, np.datetime]
# Because of the coerce=True flag, any non-compatible datetime type
# will be converted to pd.NaT. By this comparison, we can figure
# out if it is date castable or not.
if(len(np.shape(array)) == 2):
for elem in array:
temp_val = pd.to_datetime(
elem, errors='coerce', box=False, infer_datetime_format=True)
temp_val = elem if (
temp_val[0] == np.datetime64('NaT')) else temp_val
return_value.append(temp_val)
elif(isinstance(array, list)):
temp_val = pd.to_datetime(
array, errors='coerce', box=False, infer_datetime_format=True)
return_value = array if (
temp_val[0] == np.datetime64('NaT')) else temp_val
else:
temp_val = pd.to_datetime(
array, errors='coerce', box=False, infer_datetime_format=True)
temp_val = array if (
temp_val[0] == np.datetime64('NaT')) else temp_val
return_value = temp_val
return return_value
elif(isinstance(array, np.ndarray)):
warnings.warn("Array could not be converted into a date")
return array
def array_from_json(value, obj=None):
if value is not None:
# this will accept regular json data, like an array of values, which can be useful it you want
# to link bqplot to other libraries that use that
if isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict) and 'value' in value[0]:
return np.array([array_from_json(k) for k in value])
else:
return np.array(value)
elif 'value' in value:
try:
ar = np.frombuffer(value['value'], dtype=value['dtype']).reshape(value['shape'])
except AttributeError:
# in some python27/numpy versions it does not like the memoryview
# we go the .tobytes() route, but since i'm not 100% sure memory copying
# is happening or not, we one take this path if the above fails.
ar = np.frombuffer(value['value'].tobytes(), dtype=value['dtype']).reshape(value['shape'])
if value.get('type') == 'date':
assert value['dtype'] == 'float64'
ar = ar.astype('datetime64[ms]')
return ar
def array_to_json(ar, obj=None, force_contiguous=True):
if ar is None:
return None
if ar.dtype.kind in ['O']:
has_strings = False
all_strings = True # empty array we can interpret as an empty list
for el in ar:
if isinstance(el, six.string_types):
has_strings = True
else:
all_strings = False
if all_strings:
ar = ar.astype('U')
else:
if has_strings:
warnings.warn('Your array contains mixed strings and other types')
if ar.dtype.kind in ['S', 'U']: # strings to as plain json
return ar.tolist()
type = None
if ar.dtype.kind == 'M':
# since there is no support for int64, we'll use float64 but as ms
# resolution, since that is the resolution the js Date object understands
ar = ar.astype('datetime64[ms]').astype(np.float64)
type = 'date'
if ar.dtype.kind not in ['u', 'i', 'f']: # ints and floats, and datetime
raise ValueError("unsupported dtype: %s" % (ar.dtype))
# if ar.dtype == np.float64: # WebGL does not support float64, cast it here?
# ar = ar.astype(np.float32)
if ar.dtype == np.int64: # JS does not support int64
ar = ar.astype(np.int32)
if force_contiguous and not ar.flags["C_CONTIGUOUS"]: # make sure it's contiguous
ar = np.ascontiguousarray(ar)
if not ar.dtype.isnative:
dtype = ar.dtype.newbyteorder()
ar = ar.astype(dtype)
return {'value':memoryview(ar), 'dtype':str(ar.dtype), 'shape':ar.shape, 'type': type}
array_serialization = dict(to_json=array_to_json, from_json=array_from_json)
def array_squeeze(trait, value):
if len(value.shape) > 1:
return | np.squeeze(value) | numpy.squeeze |
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
from scipy.optimize import minimize_scalar
import filter
kBarWidth = 0.2
def fitLine(row, formantName, start, end, outputDir):
key = '@'.join([row['Filename'], row['Annotation'], formantName])
x = np.arange(2, 11)
y = row[formantName + '_' +
str(start): formantName + '_' + str(end)].to_numpy(dtype='float')
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1d = np.polyder(line1, 1)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd, bounds=(2, 10), method='bounded')
inflection = line1dd_max.x
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted line')
plt.plot(x, line1d(x), label='1st deriv')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle='dashed', label='inflection')
plt.legend(loc='best')
plt.title(key)
# plt.show()
plt.savefig(outputDir / (key + '.png'))
plt.clf()
plt.cla()
# return pd.Series(coeff, index=['x4', 'x3', 'x2', 'x1', 'x0'])
return pd.Series(inflection, index=['Inflection_'+formantName])
def removeChars(s):
for c in [' ', '\\', '/', '^']:
s = s.replace(c, '')
return s
class Analyzer(object):
def RunAnalysis(self, df, group_name, output_base_dir):
raise NotImplementedError
def GetName(self):
raise NotImplementedError
class FormantQuantiles(Analyzer):
def GetName(self):
return "FormantQuantiles"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
# output = df[['Filename']].copy()
# output['Annotation'] = df[['Annotation']]
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F1_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F1_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F1_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output_debug = pd.concat(
[df[['Filename']],
df[['Annotation']],
df.loc[:, df.columns.str.startswith("barkF1")],
df.loc[:, df.columns.str.startswith("barkF2")],
df.loc[:, df.columns.str.startswith("diff")],
], axis=1)
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output_debug_path = output_dir / (group_name + '.debug.csv')
output_debug.to_csv(output_debug_path, index=False)
output.to_csv(output_path, index=False)
class FormantQuantilesByDemographic(Analyzer):
def GetName(self):
return "FormantQuantilesByDemographic"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, outer_filters, inner_filters, group_name, output_dir):
for outer_f in outer_filters:
key = outer_f.GetValue()
matched_rows = dict()
for _, row in df.iterrows():
if not outer_f.IsMatched(row):
continue
for inner_f in inner_filters:
if inner_f.IsMatched(row):
matched_rows.setdefault(
inner_f.GetValue(), []).append(row)
if len(matched_rows) == 0:
continue
x = np.arange(3)
for k, v in matched_rows.items():
matched_df = pd.DataFrame(v)
full_group_name = group_name + '@' + outer_f.GetValue() + '@@' + k
df_mean = self.ComputeMean(
matched_df, full_group_name, output_dir)
y = [df_mean['diff_F1F2_25p'][0],
df_mean['diff_F1F2_50p'][0],
df_mean['diff_F1F2_75p'][0]]
plt.bar(x, y, width=kBarWidth, label=k)
x = [xval + kBarWidth for xval in x]
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.xticks([r + kBarWidth for r in range(3)],
('25%', '50%', '75%'))
plt.title(key)
plt.savefig(output_dir / (group_name + '@' +
key + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
def ComputeMean(self, df, full_group_name, output_dir):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F2_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F2_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F2_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (full_group_name + '.csv')
output_debug_path = output_dir / (full_group_name + '.debug.csv')
output.to_csv(output_path, index=False)
df.to_csv(output_debug_path, index=False)
return output
class FormantRegression(Analyzer):
def GetName(self):
return "FormantRegression"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
# line1d = np.polyder(line1, 1)
# line2d = np.polyder(line2, 1)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# plt.plot(x, line1d(x), label='F1 1st deriv')
# plt.plot(x, line2d(x), label='F2 1st deriv')
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrRegression(Analyzer):
def GetName(self):
return "HnrRegression"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
for i in range(1, 10):
df['mid_'+str(i)] = df[['HNR_'+str(i),
'HNR_'+str(i+1)]].mean(axis=1)
sy = df.loc[:, df.columns.str.startswith('mid_')].mean()
y = sy['mid_1': 'mid_9'].to_numpy(dtype='float')
x = np.arange(0, 9)
coeff = | np.polyfit(x, y, 4) | numpy.polyfit |
"""
integer converter: converts between integers and rgb encodings [red,green,blue]
image converter: converts images
"""
import numpy as np
TWO_TO_TWENTYFORUTH_MINUS_ONE = 16777215
def convert_to_rgb(an_int):
blue = an_int & 255
green = (an_int >> 8) & 255
red = (an_int >> 16) & 255
return np.uint8(red), np.uint8(green), np.uint8(blue)
def convert_array_to_rgb(an_array_of_ints):
blue = an_array_of_ints & 255
green = (an_array_of_ints >> 8) & 255
red = (an_array_of_ints >> 16) & 255
rgb_image=np.dstack((red,green,blue)).astype(np.uint8)
return rgb_image
def convert_to_rainbow(an_int):
COLOR_BIN=TWO_TO_TWENTYFORUTH_MINUS_ONE//7
slope=255/COLOR_BIN
try:
# from black...ramp-up blue in first bin
if an_int<COLOR_BIN:
x=an_int
blue=x*slope
green=0
red=0
# ramp-up green in second bin
elif an_int>=COLOR_BIN and an_int<COLOR_BIN*2:
x=an_int-COLOR_BIN
blue=255
green=x*slope
red=0
# ramp-down blue in third bin
elif an_int>=COLOR_BIN*2 and an_int<COLOR_BIN*3:
x=an_int-2*COLOR_BIN
blue=255-x*slope
green=255
red=0
# ramp-up red in the fourth bin
elif an_int>=COLOR_BIN*3 and an_int<COLOR_BIN*4:
x=an_int-3*COLOR_BIN
blue=0
green=255
red=x*slope
#ramp-down green in 5th bin
elif an_int>=COLOR_BIN*4 and an_int<COLOR_BIN*5:
x=an_int-4*COLOR_BIN
blue=0
green=255-x*slope
red=255
#ramp-up blue in 6th bin
elif an_int>=COLOR_BIN*5 and an_int<COLOR_BIN*6:
x=an_int-5*COLOR_BIN
blue=x*slope
green=0
red=255
#ramp-up green (white=highest)
elif an_int>=COLOR_BIN*6 and an_int<COLOR_BIN*7:
x=an_int-6*COLOR_BIN
blue=255
green=x*slope
red=255
except:
print('Invald range.')
return np.uint8(red), np.uint8(green), np.uint8(blue)
def convert_array_to_rainbow(an_array_of_ints):
COLOR_BIN=TWO_TO_TWENTYFORUTH_MINUS_ONE//7
slope=255/COLOR_BIN
blue=np.zeros(an_array_of_ints.shape)
green= | np.zeros(an_array_of_ints.shape) | numpy.zeros |
"""
Computes the Orbital-Optimized LCCD correlation energy.
Equations taken from [Bozkaya:2013:054104].
__authors__ = "<NAME>"
__credits__ = ["<NAME>"]
__copyright__ = "(c) 2014-2020, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
"""
import numpy as np
import psi4
from scipy import linalg as spla
from DSD import DirectSumDiis
from integrals import integrals
### Settings
mol = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 104.5
symmetry c1""")
scf_type = "pk"
target_convergence = 7
maxiter = 50
compare_psi4 = True
basis = "cc-pvdz"
### Setup
psi4.set_options({"scf_type": scf_type, "e_convergence": target_convergence + 1, "basis": basis})
I, F, intermed = integrals(mol, singles=True, return_intermediates=True)
t1 = np.zeros(F["ov"].shape)
t2 = np.zeros(I["oovv"].shape)
dsd = DirectSumDiis(3, 8)
num_occ, num_vir = t1.shape
# We need the initial one-electron integrals as well for orbital-optimized methods.
# The orbital gradient expression requires these instead of the Fock integrals that our amplitude expressions need.
H = {
"oo": np.einsum('pP, qQ, pq -> PQ', intermed["O"], intermed["O"], intermed["OEI"], optimize = True),
"ov": np.einsum('pP, qQ, pq -> PQ', intermed["O"], intermed["V"], intermed["OEI"], optimize = True),
"vv": np.einsum('pP, qQ, pq -> PQ', intermed["V"], intermed["V"], intermed["OEI"], optimize = True)
}
Escf = mol.nuclear_repulsion_energy() + np.trace(H["oo"]) + 0.5 * np.einsum("ijij ->", I["oooo"], optimize = True)
### Main Loop
for i in range(1, maxiter + 1):
if i != 1:
# Compute new orbitals and transform into them. See Section IIA5 of Bozkaya.
Zoo = np.zeros((num_occ, num_occ))
Zvv = | np.zeros((num_vir, num_vir)) | numpy.zeros |
import torch
from torch.utils.data import TensorDataset, Dataset
from torch.utils.data import DataLoader
import numpy as np
class Dataset_V5(Dataset):
def __init__(self,X_signal,X_ci,Y,Y_mask,final_len_x,diff_window_ppg,device):
self.X_signal = X_signal
self.X_ci = X_ci
self.Y = Y
self.Y_mask = Y_mask
self.final_len_x = final_len_x
self.diff_window_ppg = diff_window_ppg
self.device = device
def __getitem__(self, index):
x = self.X_signal[index]
x_ci = self.X_ci[index]
y = self.Y[index]
y_mask = self.Y_mask[index]
# Desplazamos la señal
idx_roll = np.random.randint(0, self.diff_window_ppg)
x = x[:,idx_roll:idx_roll+self.final_len_x]
x_s = x[0:1,:]
# Por ultimo normalizamos [0, 1] a la señal
a_max = np.amax(x_s, axis=1)
a_min = np.amin(x_s, axis=1)
x_s = (x_s - a_min[None,:]) / (a_max[None,:] - a_min[None,:])
x_d1 = x[1:2,:]
a_max = np.amax(x_d1, axis=1)
a_min = np.amin(x_d1, axis=1)
x_d1 = (x_d1 - a_min[None,:]) / (a_max[None,:] - a_min[None,:])
x = np.concatenate((x_s,x_d1))
x = torch.from_numpy(x).float().to(self.device)
x_ci = torch.from_numpy(x_ci).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
y_mask = torch.from_numpy(y_mask).float().to(self.device)
return (x,x_ci,y,y_mask)
def __len__(self):
return len(self.X_signal)
class Dataset_V6(Dataset):
def __init__(self,X_signal,Y,Y_mask,final_len_x,diff_window_ppg,device):
self.X_signal = X_signal
self.Y = Y
self.Y_mask = Y_mask
self.final_len_x = final_len_x
self.diff_window_ppg = diff_window_ppg
self.device = device
def __getitem__(self, index):
x = self.X_signal[index]
y = self.Y[index]
y_mask = self.Y_mask[index]
# Desplazamos la señal
idx_roll = np.random.randint(0, self.diff_window_ppg)
x = x[:,idx_roll:idx_roll+self.final_len_x]
x_s = x[0:1,:]
# Por ultimo normalizamos [0, 1] a la señal
a_max = np.amax(x_s, axis=1)
a_min = np.amin(x_s, axis=1)
x_s = (x_s - a_min[None,:]) / (a_max[None,:] - a_min[None,:])
x_d1 = x[1:2,:]
a_max = np.amax(x_d1, axis=1)
a_min = | np.amin(x_d1, axis=1) | numpy.amin |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 11:54:59 2020
@author: binod
"""
# Author: <NAME> <<EMAIL>>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from astroML.datasets import fetch_sdss_galaxy_colors
from astroML.plotting import scatter_contour
n_neighbors = 1
data = fetch_sdss_galaxy_colors()
N = len(data)
# shuffle data
np.random.seed(0)
np.random.shuffle(data)
# put colors in a matrix
X = np.zeros((N, 4))
X[:, 0] = data['u'] - data['g']
X[:, 1] = data['g'] - data['r']
X[:, 2] = data['r'] - data['i']
X[:, 3] = data['i'] - data['z']
z = data['redshift']
# divide into training and testing data
Ntrain = N // 2
Xtrain = X[:Ntrain]
ztrain = z[:Ntrain]
Xtest = X[Ntrain:]
ztest = z[Ntrain:]
knn = KNeighborsRegressor(n_neighbors, weights='uniform')
zpred = knn.fit(Xtrain, ztrain).predict(Xtest)
axis_lim = np.array([-0.1, 2.5])
rms = np.sqrt( | np.mean((ztest - zpred) ** 2) | numpy.mean |
import numpy as np
from .LowResHighResDataset import LowResHighResDataset, region_geometry
class NearestNeighborData(LowResHighResDataset):
def __init__(self, dataset: LowResHighResDataset, num_models=None, model_index=None, k=16):
super(NearestNeighborData, self).__init__(
dataset.geometry_lr, dataset.geometry_hr,
dataset.grid_names_lr, dataset.grid_names_hr,
dataset.grid_names_target
)
if isinstance(self.geometry_lr, dict):
assert len(self.geometry_lr.keys()) == 1, '[ERROR] NearestNeighborData is not thought to be used with multi-region datasets.'
self.geometry_lr = self.geometry_lr[list(self.geometry_lr.keys())[0]]
if isinstance(self.geometry_hr, dict):
assert len(self.geometry_hr.keys()) == 1, '[ERROR] NearestNeighborData is not thought to be used with multi-region datasets.'
self.geometry_hr = self.geometry_hr[list(self.geometry_hr.keys())[0]]
self.num_nearest_neighbors_lr = k
self.num_nearest_neighbors_hr = 12 * k
self._set_nearest_neighbor_indices(model_index, num_models)
self._read_dataset(dataset)
self._in_grid_mode = False
self._reset_mask_hr()
def _set_nearest_neighbor_indices(self, model_index, num_models):
mask_lr = self.geometry_lr.mask
lon_lr = self.geometry_lr.lon
lat_lr = self.geometry_lr.lat
mask_hr = self.geometry_hr.mask
lon_hr = self.geometry_hr.lon
lat_hr = self.geometry_hr.lat
max_num_models = np.sum(1 - mask_hr)
if num_models is None:
assert model_index is not None
assert len(model_index) <= max_num_models
assert max(model_index) < max_num_models
else:
if model_index is not None:
assert len(model_index) == num_models
assert len(model_index) <= max_num_models
assert max(model_index) < max_num_models
else:
model_index = np.arange(max_num_models).astype(int)
if num_models < max_num_models:
np.random.shuffle(model_index)
model_index = np.sort(model_index[:num_models])
self.num_models = len(model_index)
valid_lon_lr = lon_lr[mask_lr == 0]
valid_lat_lr = lat_lr[mask_lr == 0]
valid_lon_hr = lon_hr[mask_hr == 0]
valid_lat_hr = lat_hr[mask_hr == 0]
index_lon_lr, index_lat_lr = np.meshgrid(np.arange(self.shape_lr[1]), np.arange(self.shape_lr[0]))
index_lon_hr, index_lat_hr = np.meshgrid(np.arange(self.shape_hr[1]), np.arange(self.shape_hr[0]))
valid_index_lon_lr = index_lon_lr[mask_lr == 0].astype(int)
valid_index_lat_lr = index_lat_lr[mask_lr == 0].astype(int)
valid_index_lon_hr = index_lon_hr[mask_hr == 0].astype(int)
valid_index_lat_hr = index_lat_hr[mask_hr == 0].astype(int)
self.model_index_lon = valid_index_lon_hr[model_index]
self.model_index_lat = valid_index_lat_hr[model_index]
input_index_lon_lr = []
input_index_lat_lr = []
input_index_lon_hr = []
input_index_lat_hr = []
for i in model_index:
nn_dist = self._nearest_neighbor_metric(
lon=valid_lon_lr, lat=valid_lat_lr,
lon_0=valid_lon_hr[i], lat_0=valid_lat_hr[i]
)
rank_index_lr = np.argpartition(nn_dist, self.num_nearest_neighbors_lr)[:self.num_nearest_neighbors_lr]
input_index_lon_lr.append(valid_index_lon_lr[rank_index_lr])
input_index_lat_lr.append(valid_index_lat_lr[rank_index_lr])
self.input_index_lon_lr = np.array(input_index_lon_lr)
self.input_index_lat_lr = np.array(input_index_lat_lr)
for i in model_index:
nn_dist = self._nearest_neighbor_metric(
lon=valid_lon_hr, lat=valid_lat_hr,
lon_0=valid_lon_hr[i], lat_0=valid_lat_hr[i]
)
rank_index_hr = np.argpartition(nn_dist, self.num_nearest_neighbors_hr)[:self.num_nearest_neighbors_hr]
input_index_lon_hr.append(valid_index_lon_hr[rank_index_hr])
input_index_lat_hr.append(valid_index_lat_hr[rank_index_hr])
self.input_index_lon_hr = np.array(input_index_lon_hr)
self.input_index_lat_hr = np.array(input_index_lat_hr)
self.num_features = (
self.num_nearest_neighbors_lr * len(self.input_grids_lr()) +
self.num_nearest_neighbors_hr * len(self.input_grids_hr())
)
@staticmethod
def _nearest_neighbor_metric(lon=None, lat=None, lon_0=None, lat_0=None):
return np.abs(lon - lon_0) + np.abs(lat - lat_0)
def _read_dataset(self, dataset: LowResHighResDataset):
self.input_lr = {'static': [], 'dynamic': []}
if len(self.grid_names_lr['dynamic']):
data, _ = dataset.get_input_lr(self.grid_names_lr['dynamic'])
for idx_lat, idx_lon in zip(self.input_index_lat_lr, self.input_index_lon_lr):
self.input_lr['dynamic'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_lr.update(
{
'dynamic': np.stack(self.input_lr['dynamic'], axis=1)
if len(self.input_lr['dynamic']) > 0 else None
}
)
if len(self.grid_names_lr['static']):
data, _ = dataset.get_input_lr(self.grid_names_lr['static'])
for idx_lat, idx_lon in zip(self.input_index_lat_lr, self.input_index_lon_lr):
self.input_lr['static'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_lr.update(
{
'static': np.stack(self.input_lr['static'], axis=1)
if len(self.input_lr['static']) > 0 else None
}
)
self.input_hr = {'static': [], 'dynamic': []}
if len(self.grid_names_hr['dynamic']):
data, _ = dataset.get_input_hr(self.grid_names_hr['dynamic'])
for idx_lat, idx_lon in zip(self.input_index_lat_hr, self.input_index_lon_hr):
self.input_hr['dynamic'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_hr.update(
{
'dynamic': np.stack(self.input_hr['dynamic'], axis=1)
if len(self.input_hr['dynamic']) > 0 else None
}
)
if len(self.grid_names_hr['static']):
data, _ = dataset.get_input_hr(self.grid_names_hr['static'])
for idx_lat, idx_lon in zip(self.input_index_lat_hr, self.input_index_lon_hr):
self.input_hr['static'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_hr.update(
{
'static': np.stack(self.input_hr['static'], axis=1)
if len(self.input_hr['static']) > 0 else None
}
)
self.target = {'static': [], 'dynamic': []}
if len(self.grid_names_target['dynamic']):
data, _ = dataset.get_target(self.grid_names_target['dynamic'])
for idx_lat, idx_lon in zip(self.model_index_lat, self.model_index_lon):
self.target['dynamic'].append( | np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)) | numpy.reshape |
"""Rangeland Production Model."""
import os
import logging
import tempfile
import shutil
from builtins import range
import re
import math
import pickle
import numpy
import pandas
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import pygeoprocessing
from rangeland_production import utils
from rangeland_production import validation
LOGGER = logging.getLogger('rangeland_production.forage')
# we only have these types of soils
SOIL_TYPE_LIST = ['clay', 'silt', 'sand']
# temporary directory to store intermediate files
PROCESSING_DIR = None
# user-supplied crude protein of vegetation
CRUDE_PROTEIN = None
# state variables and parameters take their names from Century
# _SITE_STATE_VARIABLE_FILES contains state variables that are a
# property of the site, including:
# carbon in each soil compartment
# (structural, metabolic, som1, som2, som3) and layer (1=surface, 2=soil)
# e.g., som2c_2 = carbon in soil som2;
# N and P in each soil layer and compartment (1=N, 2=P)
# e.g., som2e_1_1 = N in surface som2, som2e_1_2 = P in surface som2;
# water in each soil layer, asmos_<layer>
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_SITE_STATE_VARIABLE_FILES = {
'metabc_1_path': 'metabc_1.tif',
'metabc_2_path': 'metabc_2.tif',
'som1c_1_path': 'som1c_1.tif',
'som1c_2_path': 'som1c_2.tif',
'som2c_1_path': 'som2c_1.tif',
'som2c_2_path': 'som2c_2.tif',
'som3c_path': 'som3c.tif',
'strucc_1_path': 'strucc_1.tif',
'strucc_2_path': 'strucc_2.tif',
'strlig_1_path': 'strlig_1.tif',
'strlig_2_path': 'strlig_2.tif',
'metabe_1_1_path': 'metabe_1_1.tif',
'metabe_2_1_path': 'metabe_2_1.tif',
'som1e_1_1_path': 'som1e_1_1.tif',
'som1e_2_1_path': 'som1e_2_1.tif',
'som2e_1_1_path': 'som2e_1_1.tif',
'som2e_2_1_path': 'som2e_2_1.tif',
'som3e_1_path': 'som3e_1.tif',
'struce_1_1_path': 'struce_1_1.tif',
'struce_2_1_path': 'struce_2_1.tif',
'metabe_1_2_path': 'metabe_1_2.tif',
'metabe_2_2_path': 'metabe_2_2.tif',
'plabil_path': 'plabil.tif',
'secndy_2_path': 'secndy_2.tif',
'parent_2_path': 'parent_2.tif',
'occlud_path': 'occlud.tif',
'som1e_1_2_path': 'som1e_1_2.tif',
'som1e_2_2_path': 'som1e_2_2.tif',
'som2e_1_2_path': 'som2e_1_2.tif',
'som2e_2_2_path': 'som2e_2_2.tif',
'som3e_2_path': 'som3e_2.tif',
'struce_1_2_path': 'struce_1_2.tif',
'struce_2_2_path': 'struce_2_2.tif',
'asmos_1_path': 'asmos_1.tif',
'asmos_2_path': 'asmos_2.tif',
'asmos_3_path': 'asmos_3.tif',
'asmos_4_path': 'asmos_4.tif',
'asmos_5_path': 'asmos_5.tif',
'asmos_6_path': 'asmos_6.tif',
'asmos_7_path': 'asmos_7.tif',
'asmos_8_path': 'asmos_8.tif',
'asmos_9_path': 'asmos_9.tif',
'avh2o_3_path': 'avh2o_3.tif',
'minerl_1_1_path': 'minerl_1_1.tif',
'minerl_2_1_path': 'minerl_2_1.tif',
'minerl_3_1_path': 'minerl_3_1.tif',
'minerl_4_1_path': 'minerl_4_1.tif',
'minerl_5_1_path': 'minerl_5_1.tif',
'minerl_6_1_path': 'minerl_6_1.tif',
'minerl_7_1_path': 'minerl_7_1.tif',
'minerl_8_1_path': 'minerl_8_1.tif',
'minerl_9_1_path': 'minerl_9_1.tif',
'minerl_10_1_path': 'minerl_10_1.tif',
'minerl_1_2_path': 'minerl_1_2.tif',
'minerl_2_2_path': 'minerl_2_2.tif',
'minerl_3_2_path': 'minerl_3_2.tif',
'minerl_4_2_path': 'minerl_4_2.tif',
'minerl_5_2_path': 'minerl_5_2.tif',
'minerl_6_2_path': 'minerl_6_2.tif',
'minerl_7_2_path': 'minerl_7_2.tif',
'minerl_8_2_path': 'minerl_8_2.tif',
'minerl_9_2_path': 'minerl_9_2.tif',
'minerl_10_2_path': 'minerl_10_2.tif',
'snow_path': 'snow.tif',
'snlq_path': 'snlq.tif',
}
# _PFT_STATE_VARIABLES contains state variables that are a
# property of a PFT, including:
# carbon, nitrogen, and phosphorous in aboveground biomass
# where 1=N, 2=P
# e.g. aglivc = C in aboveground live biomass,
# aglive_1 = N in aboveground live biomass;
# carbon, nitrogen, and phosphorous in aboveground standing dead
# biomass, stdedc and stdede;
# carbon, nitrogen and phosphorous in belowground live biomass,
# aglivc and aglive
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_PFT_STATE_VARIABLES = [
'aglivc', 'bglivc', 'stdedc', 'aglive_1', 'bglive_1',
'stdede_1', 'aglive_2', 'bglive_2', 'stdede_2', 'avh2o_1',
'crpstg_1', 'crpstg_2',
]
# intermediate parameters that do not change between timesteps,
# including field capacity and wilting point of each soil layer,
# coefficients describing effect of soil texture on decomposition
# rates
_PERSISTENT_PARAMS_FILES = {
'afiel_1_path': 'afiel_1.tif',
'afiel_2_path': 'afiel_2.tif',
'afiel_3_path': 'afiel_3.tif',
'afiel_4_path': 'afiel_4.tif',
'afiel_5_path': 'afiel_5.tif',
'afiel_6_path': 'afiel_6.tif',
'afiel_7_path': 'afiel_7.tif',
'afiel_8_path': 'afiel_8.tif',
'afiel_9_path': 'afiel_9.tif',
'awilt_1_path': 'awilt_1.tif',
'awilt_2_path': 'awilt_2.tif',
'awilt_3_path': 'awilt_3.tif',
'awilt_4_path': 'awilt_4.tif',
'awilt_5_path': 'awilt_5.tif',
'awilt_6_path': 'awilt_6.tif',
'awilt_7_path': 'awilt_7.tif',
'awilt_8_path': 'awilt_8.tif',
'awilt_9_path': 'awilt_9.tif',
'wc_path': 'wc.tif',
'eftext_path': 'eftext.tif',
'p1co2_2_path': 'p1co2_2.tif',
'fps1s3_path': 'fps1s3.tif',
'orglch_path': 'orglch.tif',
'fps2s3_path': 'fps2s3.tif',
'rnewas_1_1_path': 'rnewas_1_1.tif',
'rnewas_2_1_path': 'rnewas_2_1.tif',
'rnewas_1_2_path': 'rnewas_1_2.tif',
'rnewas_2_2_path': 'rnewas_2_2.tif',
'rnewbs_1_1_path': 'rnewbs_1_1.tif',
'rnewbs_1_2_path': 'rnewbs_1_2.tif',
'rnewbs_2_1_path': 'rnewbs_2_1.tif',
'rnewbs_2_2_path': 'rnewbs_2_2.tif',
'vlossg_path': 'vlossg.tif',
}
# site-level values that are updated once per year
_YEARLY_FILES = {
'annual_precip_path': 'annual_precip.tif',
'baseNdep_path': 'baseNdep.tif',
}
# pft-level values that are updated once per year
_YEARLY_PFT_FILES = ['pltlig_above', 'pltlig_below']
# intermediate values for each plant functional type that are shared
# between submodels, but do not need to be saved as output
_PFT_INTERMEDIATE_VALUES = [
'h2ogef_1', 'tgprod_pot_prod',
'cercrp_min_above_1', 'cercrp_min_above_2',
'cercrp_max_above_1', 'cercrp_max_above_2',
'cercrp_min_below_1', 'cercrp_min_below_2',
'cercrp_max_below_1', 'cercrp_max_below_2',
'tgprod', 'rtsh', 'flgrem', 'fdgrem']
# intermediate site-level values that are shared between submodels,
# but do not need to be saved as output
_SITE_INTERMEDIATE_VALUES = [
'amov_1', 'amov_2', 'amov_3', 'amov_4', 'amov_5', 'amov_6', 'amov_7',
'amov_8', 'amov_9', 'amov_10', 'snowmelt', 'bgwfunc', 'diet_sufficiency']
# fixed parameters for each grazing animal type are adapted from the GRAZPLAN
# model as described by Freer et al. 2012, "The GRAZPLAN animal biology model
# for sheep and cattle and the GrazFeed decision support tool"
_FREER_PARAM_DICT = {
'b_indicus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.31,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'b_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.36,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'indicus_x_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.335,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'sheep': {
'CN1': 0.0157,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.04,
'CI2': 1.7,
'CI8': 28,
'CI9': 1.4,
'CI12': 0.15,
'CI13': 0.02,
'CI14': 0.002,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00112,
'CR5': 0.6,
'CR6': 0.00112,
'CR7': 0,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.486,
'CL1': 2,
'CL2': 22,
'CL3': 1,
'CL5': 0.94,
'CL6': 4.7,
'CL15': 0.045,
'CM1': 0.09,
'CM2': 0.26,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.02,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CW1': 24,
'CW2': 0.004,
'CW3': 0.7,
'CW5': 0.25,
'CW6': 0.072,
'CW7': 1.35,
'CW8': 0.016,
'CW9': 1,
'CW12': 0.025,
'CP1': 150,
'CP4': 0.33,
'CP5': 1.43,
'CP6': 3.38,
'CP7': 0.91,
'CP8': 4.33,
'CP9': 4.37,
'CP10': 0.965,
'CP15': 0.1,
},
}
# Target nodata is for general rasters that are positive, and _IC_NODATA are
# for rasters that are any range
_TARGET_NODATA = -1.0
_IC_NODATA = float(numpy.finfo('float32').min)
# SV_NODATA is for state variables
_SV_NODATA = -1.0
def execute(args):
"""InVEST Forage Model.
[model description]
Parameters:
args['workspace_dir'] (string): path to target output workspace.
args['results_suffix'] (string): (optional) string to append to any
output file names
args['starting_month'] (int): what month to start reporting where
the range 1..12 is equivalent to Jan..Dec.
args['starting_year'] (int): what year to start runs. this value is
used to notate outputs in the form [month_int]_[year]
args['n_months'] (int): number of months to run model, the model run
will start reporting in `args['starting_month']`.
args['aoi_path'] (string): path to polygon vector indicating the
desired spatial extent of the model. This has the effect of
clipping the computational area of the input datasets to be the
area intersected by this polygon.
args['management_threshold'] (float): biomass in kg/ha required to be
left standing at each model step after offtake by grazing animals
args['proportion_legume_path'] (string): path to raster containing
fraction of pasture that is legume, by weight
args['bulk_density_path'] (string): path to bulk density raster.
args['ph_path'] (string): path to soil pH raster.
args['clay_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is clay
args['silt_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is silt
args['sand_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is sand
args['precip_dir'] (string): path to a directory containing monthly
precipitation rasters. The model requires at least 12 months of
precipitation and expects to find a precipitation file input for
every month of the simulation, so the number of precipitation
files should be the maximum of 12 and `n_months`. The file name of
each precipitation raster must end with the year, followed by an
underscore, followed by the month number. E.g., Precip_2016_1.tif
for January of 2016.
args['min_temp_dir'] (string): path to a directory containing monthly
minimum temperature rasters. The model requires one minimum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
minimum temperature raster must end with the month number. E.g.,
Min_temperature_1.tif for January.
args['max_temp_dir'] (string): path to a directory containing monthly
maximum temperature rasters. The model requires one maximum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
maximum temperature raster must end with the month number. E.g.,
Max_temperature_1.tif for January.
args['site_param_table'] (string): path to csv file giving site
parameters. This file must contain a column named "site" that
contains unique integers. These integer values correspond to site
type identifiers which are values in the site parameter spatial
index raster. Other required fields for this table are site and
"fixed" parameters from the Century model, i.e., the parameters
in the Century input files site.100 and fix.100.
args['site_param_spatial_index_path'] (string): path to a raster file
that indexes site parameters, indicating which set of site
parameter values should apply at each pixel in the raster. The
raster should be composed of integers that correspond to values in
the field "site" in `site_param_table`.
args['veg_trait_path'] (string): path to csv file giving vegetation
traits for each plant functional type available for grazing. This
file must contain a column named "PFT" that contains unique
integers. These integer values correspond to PFT identifiers of
veg spatial composition rasters. Other required fields for this
table are vegetation input parameters from the Century model, for
example maximum intrinsic growth rate, optimum temperature for
production, minimum C/N ratio, etc.
args['veg_spatial_composition_path_pattern'] (string): path to
vegetation rasters, one per plant functional type available for
grazing, where <PFT> can be replaced with an integer that is
indexed in the veg trait csv.
Example: if this value is given as `./vegetation/pft_<PFT>.tif`
and the directory `./vegetation/` contains these files:
"pft_1.tif"
"pft_12.tif"
"pft_50.tif",
then the "PFT" field in the vegetation trait table must contain
the values 1, 12, and 50.
args['animal_trait_path'] (string): path to csv file giving animal
traits for each animal type - number - duration combination. This
table must contain a column named "animal_id" that contains unique
integers. These integer values correspond to features in the
animal management layer.
Other required fields in this table are:
type (allowable values: b_indicus, b_taurus,
indicus_x_taurus, sheep, camelid, hindgut_fermenter)
sex (allowable values: entire_m, castrate, breeding_female,
NA)
age (days)
weight (kg)
SRW (standard reference weight, kg; the weight of a mature
female in median condition)
SFW (standard fleece weight, kg; the average weight of fleece
of a mature adult; for sheep only)
birth_weight (kg)
grz_months (a string of integers, separated by ','; months of
the simulation when animals are present,
relative to `starting_month`. For example, if `n_months`
is 3, and animals are present during the entire simulation
period, `grz_months` should be "1,2,3")
args['animal_grazing_areas_path'] (string): path to animal vector
inputs giving the location of grazing animals. Must have a field
named "animal_id", containing unique integers that correspond to
the values in the "animal_id" column of the animal trait csv, and
a field named "num_animal" giving the number of animals grazing
inside each polygon feature.
args['initial_conditions_dir'] (string): optional input, path to
directory containing initial conditions. If this directory is not
supplied, a site_initial_table and pft_initial_table must be
supplied. If supplied, this directory must contain a series of
rasters with initial values for each PFT and for the site.
Required rasters for each PFT:
initial variables that are a property of PFT in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
e.g., aglivc_<PFT>.tif
Required for the site:
initial variables that are a property of site in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['site_initial_table'] (string): optional input, path to table
containing initial conditions for each site state variable. If an
initial conditions directory is not supplied, this table must be
supplied. This table must contain a value for each site code and
each state variable listed in the following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['pft_initial_table'] (string): optional input, path to table
containing initial conditions for each plant functional type state
variable. If an initial conditions directory is not supplied, this
table must be supplied. This table must contain a value for each
plant functional type index and each state variable listed in the
following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['save_sv_rasters'] (boolean): optional input, default false.
Should rasters containing all state variables be saved for each
model time step?
args['animal_density'] (string): optional input, density of grazing
animals in animals per hectare.
args['crude_protein'] (float): optional input, crude protein
concentration of forage for the purposes of animal diet selection.
Should be a value between 0-1. If included, this value is
substituted for N content of forage when calculating digestibility
and "ingestibility" of forage, and protein content of the diet, for
grazing animals.
Returns:
None.
"""
LOGGER.info("model execute: %s", args)
starting_month = int(args['starting_month'])
starting_year = int(args['starting_year'])
n_months = int(args['n_months'])
try:
delete_sv_folders = not args['save_sv_rasters']
except KeyError:
delete_sv_folders = True
try:
global CRUDE_PROTEIN
CRUDE_PROTEIN = args['crude_protein']
except KeyError:
pass
try:
animal_density_path = args['animal_density']
except KeyError:
args['animal_density'] = None
# this set will build up the integer months that are used so we can index
# them with temperature later
temperature_month_set = set()
# this dict will be used to build the set of input rasters associated with
# a reasonable lookup ID so we can have a nice dataset to align for raster
# stack operations
base_align_raster_path_id_map = {}
precip_dir_list = [
os.path.join(args['precip_dir'], f) for f in
os.listdir(args['precip_dir'])]
for month_index in range(n_months):
month_i = (starting_month + month_index - 1) % 12 + 1
temperature_month_set.add(month_i)
year = starting_year + (starting_month + month_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No precipitation data found for year %d, month %d" %
(year, month_i))
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_{}'.format(month_index)] = file_list[0]
# the model requires 12 months of precipitation data to calculate
# atmospheric N deposition and potential production from annual precip
n_precip_months = int(args['n_months'])
if n_precip_months < 12:
m_index = int(args['n_months'])
while n_precip_months < 12:
month_i = (starting_month + m_index - 1) % 12 + 1
year = starting_year + (starting_month + m_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
break
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_%d' % m_index] = file_list[0]
n_precip_months = n_precip_months + 1
m_index = m_index + 1
if n_precip_months < 12:
raise ValueError("At least 12 months of precipitation data required")
# collect monthly temperature data
min_temp_dir_list = [
os.path.join(args['min_temp_dir'], f) for f in
os.listdir(args['min_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in min_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No minimum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'min_temp_%d' % month_i] = file_list[0]
max_temp_dir_list = [
os.path.join(args['max_temp_dir'], f) for f in
os.listdir(args['max_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in max_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No maximum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'max_temp_%d' % month_i] = file_list[0]
# lookup to provide path to soil percent given soil type
for soil_type in SOIL_TYPE_LIST:
base_align_raster_path_id_map[soil_type] = (
args['%s_proportion_path' % soil_type])
if not os.path.exists(base_align_raster_path_id_map[soil_type]):
raise ValueError(
"Couldn't find %s for %s" % (
base_align_raster_path_id_map[soil_type], soil_type))
base_align_raster_path_id_map['bulk_d_path'] = args['bulk_density_path']
base_align_raster_path_id_map['ph_path'] = args['ph_path']
# make sure site initial conditions and parameters exist for each site
# identifier
base_align_raster_path_id_map['site_index'] = (
args['site_param_spatial_index_path'])
n_bands = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['n_bands']
if n_bands > 1:
raise ValueError(
'Site spatial index raster must contain only one band')
site_datatype = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['datatype']
if site_datatype not in [1, 2, 3, 4, 5]:
raise ValueError('Site spatial index raster must be integer type')
# get unique values in site param raster
site_index_set = set()
for offset_map, raster_block in pygeoprocessing.iterblocks(
(args['site_param_spatial_index_path'], 1)):
site_index_set.update(numpy.unique(raster_block))
site_nodata = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['nodata'][0]
if site_nodata in site_index_set:
site_index_set.remove(site_nodata)
site_param_table = utils.build_lookup_from_csv(
args['site_param_table'], 'site')
missing_site_index_list = list(
site_index_set.difference(site_param_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find parameter values for the following site " +
"indices: %s\n\t" + ", ".join(missing_site_index_list))
# make sure plant functional type parameters exist for each pft raster
pft_dir = os.path.dirname(args['veg_spatial_composition_path_pattern'])
pft_basename = os.path.basename(
args['veg_spatial_composition_path_pattern'])
files = [
f for f in os.listdir(pft_dir) if os.path.isfile(
os.path.join(pft_dir, f))]
pft_regex = re.compile(pft_basename.replace('<PFT>', r'(\d+)'))
pft_matches = [
m for m in [pft_regex.search(f) for f in files] if m is not None]
pft_id_set = set([int(m.group(1)) for m in pft_matches])
for pft_i in pft_id_set:
pft_path = args['veg_spatial_composition_path_pattern'].replace(
'<PFT>', '%d' % pft_i)
base_align_raster_path_id_map['pft_%d' % pft_i] = pft_path
veg_trait_table = utils.build_lookup_from_csv(
args['veg_trait_path'], 'PFT')
missing_pft_trait_list = pft_id_set.difference(veg_trait_table.keys())
if missing_pft_trait_list:
raise ValueError(
"Couldn't find trait values for the following plant functional " +
"types: %s\n\t" + ", ".join(missing_pft_trait_list))
frtcindx_set = set([
pft_i['frtcindx'] for pft_i in veg_trait_table.values()])
if frtcindx_set.difference(set([0, 1])):
raise ValueError("frtcindx parameter contains invalid values")
base_align_raster_path_id_map['proportion_legume_path'] = args[
'proportion_legume_path']
# track separate state variable files for each PFT
pft_sv_dict = {}
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
pft_sv_dict['{}_{}_path'.format(
sv, pft_i)] = '{}_{}.tif'.format(sv, pft_i)
# make sure animal traits exist for each feature in animal management
# layer
anim_id_list = []
driver = ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.Open(args['animal_grazing_areas_path'], 0)
layer = datasource.GetLayer()
for feature in layer:
anim_id_list.append(feature.GetField('animal_id'))
input_animal_trait_table = utils.build_lookup_from_csv(
args['animal_trait_path'], 'animal_id')
missing_animal_trait_list = set(
anim_id_list).difference(input_animal_trait_table.keys())
if missing_animal_trait_list:
raise ValueError(
"Couldn't find trait values for the following animal " +
"ids: %s\n\t" + ", ".join(missing_animal_trait_list))
# if animal density is supplied, align inputs to match its resolution
# otherwise, match resolution of precipitation rasters
if args['animal_density']:
target_pixel_size = pygeoprocessing.get_raster_info(
args['animal_density'])['pixel_size']
base_align_raster_path_id_map['animal_density'] = args[
'animal_density']
else:
target_pixel_size = pygeoprocessing.get_raster_info(
base_align_raster_path_id_map['precip_0'])['pixel_size']
LOGGER.info(
"pixel size of aligned inputs: %s", target_pixel_size)
# temporary directory for intermediate files
global PROCESSING_DIR
PROCESSING_DIR = os.path.join(args['workspace_dir'], "temporary_files")
if not os.path.exists(PROCESSING_DIR):
os.makedirs(PROCESSING_DIR)
# set up a dictionary that uses the same keys as
# 'base_align_raster_path_id_map' to point to the clipped/resampled
# rasters to be used in raster calculations for the model.
aligned_raster_dir = os.path.join(
args['workspace_dir'], 'aligned_inputs')
if os.path.exists(aligned_raster_dir):
shutil.rmtree(aligned_raster_dir)
os.makedirs(aligned_raster_dir)
aligned_inputs = dict([(key, os.path.join(
aligned_raster_dir, 'aligned_%s' % os.path.basename(path)))
for key, path in base_align_raster_path_id_map.items()])
# align all the base inputs to be the minimum known pixel size and to
# only extend over their combined intersections
source_input_path_list = [
base_align_raster_path_id_map[k] for k in sorted(
base_align_raster_path_id_map.keys())]
aligned_input_path_list = [
aligned_inputs[k] for k in sorted(aligned_inputs.keys())]
pygeoprocessing.align_and_resize_raster_stack(
source_input_path_list, aligned_input_path_list,
['near'] * len(source_input_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']],
vector_mask_options={'mask_vector_path': args['aoi_path']})
_check_pft_fractional_cover_sum(aligned_inputs, pft_id_set)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
# create animal trait spatial index raster from management polygon
aligned_inputs['animal_index'] = os.path.join(
aligned_raster_dir, 'animal_spatial_index.tif')
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], aligned_inputs['animal_index'],
gdal.GDT_Int32, [_TARGET_NODATA], fill_value_list=[_TARGET_NODATA])
pygeoprocessing.rasterize(
args['animal_grazing_areas_path'], aligned_inputs['animal_index'],
option_list=["ATTRIBUTE=animal_id"])
# create uniform animal density raster, if not supplied as input
if not args['animal_density']:
aligned_inputs['animal_density'] = os.path.join(
aligned_raster_dir, 'animal_density.tif')
_animal_density(aligned_inputs, args['animal_grazing_areas_path'])
# Initialization
sv_dir = os.path.join(args['workspace_dir'], 'state_variables_m-1')
os.makedirs(sv_dir)
initial_conditions_dir = None
try:
initial_conditions_dir = args['initial_conditions_dir']
except KeyError:
pass
if initial_conditions_dir:
# check that a raster for each required state variable is supplied
missing_initial_values = []
# set _SV_NODATA from initial rasters
state_var_nodata = set([])
# align initial state variables to resampled inputs
resample_initial_path_map = {}
for sv in _SITE_STATE_VARIABLE_FILES:
sv_path = os.path.join(
initial_conditions_dir, _SITE_STATE_VARIABLE_FILES[sv])
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata'][0]]))
resample_initial_path_map[sv] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
sv_key = '{}_{}_path'.format(sv, pft_i)
sv_path = os.path.join(
initial_conditions_dir, '{}_{}.tif'.format(sv, pft_i))
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata']
[0]]))
resample_initial_path_map[sv_key] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
if missing_initial_values:
raise ValueError(
"Couldn't find the following required initial values: " +
"\n\t".join(missing_initial_values))
if len(state_var_nodata) > 1:
raise ValueError(
"Initial state variable rasters contain >1 nodata value")
global _SV_NODATA
_SV_NODATA = list(state_var_nodata)[0]
# align initial values with inputs
initial_path_list = (
[aligned_inputs['precip_0']] +
[resample_initial_path_map[key] for key in sorted(
resample_initial_path_map.keys())])
aligned_initial_path_list = (
[os.path.join(PROCESSING_DIR, 'aligned_input_template.tif')] +
[os.path.join(
sv_dir, os.path.basename(resample_initial_path_map[key])) for
key in sorted(resample_initial_path_map.keys())])
pygeoprocessing.align_and_resize_raster_stack(
initial_path_list, aligned_initial_path_list,
['near'] * len(initial_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']], raster_align_index=0,
vector_mask_options={'mask_vector_path': args['aoi_path']})
sv_reg = dict(
[(key, os.path.join(sv_dir, os.path.basename(path)))
for key, path in resample_initial_path_map.items()])
else:
# create initialization rasters from tables
try:
site_initial_conditions_table = utils.build_lookup_from_csv(
args['site_initial_table'], 'site')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_site_index_list = list(
site_index_set.difference(site_initial_conditions_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find initial conditions values for the following " +
"site indices: %s\n\t" + ", ".join(missing_site_index_list))
try:
pft_initial_conditions_table = utils.build_lookup_from_csv(
args['pft_initial_table'], 'PFT')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_pft_index_list = pft_id_set.difference(
pft_initial_conditions_table.keys())
if missing_pft_index_list:
raise ValueError(
"Couldn't find initial condition values for the following "
"plant functional types: %s\n\t" + ", ".join(
missing_pft_index_list))
sv_reg = initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table)
# calculate persistent intermediate parameters that do not change during
# the simulation
persist_param_dir = os.path.join(
args['workspace_dir'], 'intermediate_parameters')
utils.make_directories([persist_param_dir])
pp_reg = utils.build_file_registry(
[(_PERSISTENT_PARAMS_FILES, persist_param_dir)], file_suffix)
# calculate derived animal traits that do not change during the simulation
freer_parameter_df = pandas.DataFrame.from_dict(
_FREER_PARAM_DICT, orient='index')
freer_parameter_df['type'] = freer_parameter_df.index
animal_trait_table = calc_derived_animal_traits(
input_animal_trait_table, freer_parameter_df)
# calculate maximum potential intake of each animal type
for animal_id in animal_trait_table.keys():
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# calculate field capacity and wilting point
LOGGER.info("Calculating field capacity and wilting point")
_afiel_awilt(
aligned_inputs['site_index'], site_param_table,
sv_reg['som1c_2_path'], sv_reg['som2c_2_path'], sv_reg['som3c_path'],
aligned_inputs['sand'], aligned_inputs['silt'],
aligned_inputs['clay'], aligned_inputs['bulk_d_path'], pp_reg)
# calculate other persistent parameters
LOGGER.info("Calculating persistent parameters")
_persistent_params(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['sand'], aligned_inputs['clay'], pp_reg)
# calculate required ratios for decomposition of structural material
LOGGER.info("Calculating required ratios for structural decomposition")
_structural_ratios(
aligned_inputs['site_index'], site_param_table, sv_reg, pp_reg)
# make yearly directory for values that are updated every twelve months
year_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
year_reg = dict(
[(key, os.path.join(year_dir, path)) for key, path in
_YEARLY_FILES.items()])
for pft_i in pft_id_set:
for file in _YEARLY_PFT_FILES:
year_reg['{}_{}'.format(file, pft_i)] = os.path.join(
year_dir, '{}_{}.tif'.format(file, pft_i))
# make monthly directory for monthly intermediate parameters that are
# shared between submodels, but do not need to be saved as output
month_temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
month_reg = {}
for pft_i in pft_id_set:
for val in _PFT_INTERMEDIATE_VALUES:
month_reg['{}_{}'.format(
val, pft_i)] = os.path.join(
month_temp_dir, '{}_{}.tif'.format(val, pft_i))
for val in _SITE_INTERMEDIATE_VALUES:
month_reg[val] = os.path.join(month_temp_dir, '{}.tif'.format(val))
output_dir = os.path.join(args['workspace_dir'], "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# provisional state variable registry contains provisional biomass in
# absence of grazing
provisional_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
provisional_sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, provisional_sv_dir),
(pft_sv_dict, provisional_sv_dir)], file_suffix)
intermediate_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
# Main simulation loop
# for each step in the simulation
for month_index in range(n_months):
if (month_index % 12) == 0:
# Update yearly quantities
_yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg)
current_month = (starting_month + month_index - 1) % 12 + 1
current_year = starting_year + (starting_month + month_index - 1) // 12
# track state variables from previous step
prev_sv_reg = sv_reg
for animal_id in animal_trait_table.keys():
if animal_trait_table[animal_id]['sex'] == 'breeding_female':
revised_animal_trait_dict = update_breeding_female_status(
animal_trait_table[animal_id], month_index)
animal_trait_table[animal_id] = revised_animal_trait_dict
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# enforce absence of grazing as zero biomass removed
for pft_i in pft_id_set:
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['fdgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
# populate provisional_sv_reg with provisional biomass in absence of
# grazing
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg,
provisional_sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg,
provisional_sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
provisional_sv_reg)
intermediate_sv_reg = copy_intermediate_sv(
pft_id_set, provisional_sv_reg, intermediate_sv_dir)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, provisional_sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, provisional_sv_reg)
# estimate grazing offtake by animals relative to provisional biomass
# at an intermediate step, after senescence but before new growth
_calc_grazing_offtake(
aligned_inputs, args['aoi_path'], args['management_threshold'],
intermediate_sv_reg, pft_id_set, aligned_inputs['animal_index'],
animal_trait_table, veg_trait_table, current_month, month_reg)
# estimate actual biomass production for this step, integrating impacts
# of grazing
sv_dir = os.path.join(
args['workspace_dir'], 'state_variables_m%d' % month_index)
utils.make_directories([sv_dir])
sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, sv_dir),
(pft_sv_dict, sv_dir)], file_suffix)
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg, sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
sv_reg)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, sv_reg)
_animal_diet_sufficiency(
sv_reg, pft_id_set, aligned_inputs, animal_trait_table,
veg_trait_table, current_month, month_reg)
_grazing(
aligned_inputs, site_param_table, month_reg, animal_trait_table,
pft_id_set, sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, sv_reg)
_leach(aligned_inputs, site_param_table, month_reg, sv_reg)
_write_monthly_outputs(
aligned_inputs, provisional_sv_reg, sv_reg, month_reg, pft_id_set,
current_year, current_month, output_dir, file_suffix)
# summary results
summary_output_dir = os.path.join(output_dir, 'summary_results')
os.makedirs(summary_output_dir)
summary_shp_path = os.path.join(
summary_output_dir,
'grazing_areas_results_rpm{}.shp'.format(file_suffix))
create_vector_copy(
args['animal_grazing_areas_path'], summary_shp_path)
field_pickle_map, field_header_order_list = aggregate_and_pickle_results(
output_dir, summary_shp_path)
_add_fields_to_shapefile(
field_pickle_map, field_header_order_list, summary_shp_path)
# clean up
shutil.rmtree(persist_param_dir)
shutil.rmtree(PROCESSING_DIR)
if delete_sv_folders:
for month_index in range(-1, n_months):
shutil.rmtree(
os.path.join(
args['workspace_dir'],
'state_variables_m%d' % month_index))
def raster_multiplication(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Multiply raster1 by raster2.
Multiply raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_multiply_op(raster1, raster2):
"""Multiply two rasters."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
result[valid_mask] = raster1[valid_mask] * raster2[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_multiply_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_division(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Divide raster1 by raster2.
Divide raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_divide_op(raster1, raster2):
"""Divide raster1 by raster2."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
raster1 = raster1.astype(numpy.float32)
raster2 = raster2.astype(numpy.float32)
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
error_mask = ((raster1 != 0) & (raster2 == 0.) & valid_mask)
zero_mask = ((raster1 == 0.) & (raster2 == 0.) & valid_mask)
nonzero_mask = ((raster2 != 0.) & valid_mask)
result[error_mask] = target_path_nodata
result[zero_mask] = 0.
result[nonzero_mask] = raster1[nonzero_mask] / raster2[nonzero_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_divide_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_list_sum(
raster_list, input_nodata, target_path, target_nodata,
nodata_remove=False):
"""Calculate the sum per pixel across rasters in a list.
Sum the rasters in `raster_list` element-wise, allowing nodata values
in the rasters to propagate to the result or treating nodata as zero. If
nodata is treated as zero, areas where all inputs are nodata will be nodata
in the output.
Parameters:
raster_list (list): list of paths to rasters to sum
input_nodata (float or int): nodata value in the input rasters
target_path (string): path to location to store the result
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any input
raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(*raster_list):
"""Add the rasters in raster_list without removing nodata values."""
invalid_mask = numpy.any(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
def raster_sum_op_nodata_remove(*raster_list):
"""Add the rasters in raster_list, treating nodata as zero."""
invalid_mask = numpy.all(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op_nodata_remove,
target_path, gdal.GDT_Float32, target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op,
target_path, gdal.GDT_Float32, target_nodata)
def raster_sum(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Add raster 1 and raster2.
Add raster1 and raster2, allowing nodata values in the rasters to
propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to one raster operand
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to second raster operand
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the sum
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(raster1, raster2):
"""Add raster1 and raster2 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] + raster2[valid_mask]
return result
def raster_sum_op_nodata_remove(raster1, raster2):
"""Add raster1 and raster2, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 + raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op, target_path, gdal.GDT_Float32,
target_nodata)
def raster_difference(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Subtract raster2 from raster1.
Subtract raster2 from raster1 element-wise, allowing nodata values in the
rasters to propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to raster from which to subtract raster2
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to raster which should be subtracted from
raster1
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the difference
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the difference in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_difference_op(raster1, raster2):
"""Subtract raster2 from raster1 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]
return result
def raster_difference_op_nodata_remove(raster1, raster2):
"""Subtract raster2 from raster1, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 - raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op, target_path, gdal.GDT_Float32,
target_nodata)
def reclassify_nodata(target_path, new_nodata_value):
"""Reclassify the nodata value of a raster to a new value.
Convert all areas of nodata in the target raster to the new nodata
value, which must be an integer.
Parameters:
target_path (string): path to target raster
new_nodata_value (integer): new value to set as nodata
Side effects:
modifies the raster indicated by `target_path`
Returns:
None
"""
def reclassify_op(target_raster):
reclassified_raster = numpy.copy(target_raster)
reclassify_mask = (target_raster == previous_nodata_value)
reclassified_raster[reclassify_mask] = new_nodata_value
return reclassified_raster
fd, temp_path = tempfile.mkstemp(dir=PROCESSING_DIR)
shutil.copyfile(target_path, temp_path)
previous_nodata_value = pygeoprocessing.get_raster_info(
target_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(temp_path, 1)], reclassify_op, target_path, gdal.GDT_Float32,
new_nodata_value)
# clean up
os.close(fd)
os.remove(temp_path)
def weighted_state_variable_sum(
sv, sv_reg, aligned_inputs, pft_id_set, weighted_sum_path):
"""Calculate weighted sum of state variable across plant functional types.
To sum a state variable across PFTs within a grid cell, the state variable
must be weighted by the fractional cover of each PFT inside the grid cell.
First multiply the state variable by its fractional cover, and then add up
the weighted products.
Parameters:
sv (string): state variable to be summed across plant functional types
sv_reg (dict): map of key, path pairs giving paths to state variables,
including sv, the state variable to be summed
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
weighted_sum_path (string): path to raster that should contain the
weighted sum across PFTs
Side effects:
modifies or creates the raster indicated by `weighted_sum_path`
Returns:
None
"""
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in pft_id_set:
val = '{}_weighted'.format(sv)
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
weighted_path_list = []
for pft_i in pft_id_set:
target_path = temp_val_dict['{}_weighted_{}'.format(sv, pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
sv_reg['{}_{}_path'.format(sv, pft_i)], _SV_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
raster_list_sum(
weighted_path_list, _TARGET_NODATA, weighted_sum_path, _TARGET_NODATA,
nodata_remove=True)
# clean up temporary files
shutil.rmtree(temp_dir)
def _check_pft_fractional_cover_sum(aligned_inputs, pft_id_set):
"""Check the sum of fractional cover across plant functional types.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
Raises:
ValueError if the pixel-wise sum of fractional cover values across
plant functional types exceeds 1
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='cover_sum', dir=PROCESSING_DIR) as cover_sum_temp_file:
cover_sum_path = cover_sum_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
# initialize sum to zero
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], cover_sum_path, gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
for pft_i in pft_id_set:
shutil.copyfile(cover_sum_path, operand_temp_path)
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_sum(
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
operand_temp_path, _TARGET_NODATA,
cover_sum_path, _TARGET_NODATA)
# get maximum sum of fractional cover
max_cover = 0.
for offset_map, raster_block in pygeoprocessing.iterblocks(
(cover_sum_path, 1)):
valid_mask = (raster_block != _TARGET_NODATA)
if raster_block[valid_mask].size > 0:
max_cover = max(max_cover, numpy.amax(raster_block[valid_mask]))
if max_cover > 1:
raise ValueError(
"Fractional cover across plant functional types exceeds 1")
# clean up
os.remove(cover_sum_path)
def initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table):
"""Generate initial state variable registry from initial conditions tables.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including site spatial index raster and
fractional cover of each plant functional type
sv_dir (string): path to directory where initial state variable rasters
should be stored
pft_id_set (set): set of integers identifying plant functional types
site_initial_conditions_table (dict): map of site spatial index to
dictionaries that contain initial values for site-level state
variables
pft_initial_conditions_table (dict): map of plant functional type index
to dictionaries that contain initial values for plant functional
type-level state variables
Returns:
initial_sv_reg, map of key, path pairs giving paths to initial state
variable rasters
"""
def full_masked(pft_cover, fill_val):
"""Create a constant raster masked by pft fractional cover.
Parameters:
pft_cover (numpy.ndarray): input, fractional cover of the plant
functional type
fill_val (float): constant value with which to fill raster in areas
where fractional cover > 0
Returns:
full_masked, a raster containing `fill_val` in areas where
`pft_cover` > 0
"""
valid_mask = (
(~numpy.isclose(pft_cover, _SV_NODATA)) &
(pft_cover > 0))
full_masked = numpy.empty(pft_cover.shape, dtype=numpy.float32)
full_masked[:] = _SV_NODATA
full_masked[valid_mask] = fill_val
return full_masked
initial_sv_reg = {}
# site-level state variables
# check for missing state variable values
required_site_state_var = set(
[sv_key[:-5] for sv_key in _SITE_STATE_VARIABLE_FILES.keys()])
for site_code in site_initial_conditions_table.keys():
missing_site_state_var = required_site_state_var.difference(
site_initial_conditions_table[site_code].keys())
if missing_site_state_var:
raise ValueError(
"The following state variables were not found in the site " +
"initial conditions table: \n\t" + "\n\t".join(
missing_site_state_var))
for sv_key, basename in _SITE_STATE_VARIABLE_FILES.items():
state_var = sv_key[:-5]
site_to_val = dict(
[(site_code, float(table[state_var])) for (
site_code, table) in
site_initial_conditions_table.items()])
target_path = os.path.join(sv_dir, basename)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _SV_NODATA)
# PFT-level state variables
for pft_i in pft_id_set:
# check for missing values
missing_pft_state_var = set(_PFT_STATE_VARIABLES).difference(
pft_initial_conditions_table[pft_i].keys())
if missing_pft_state_var:
raise ValueError(
"The following state variables were not found in the plant " +
"functional type initial conditions table: \n\t" + "\n\t".join(
missing_pft_state_var))
for state_var in _PFT_STATE_VARIABLES:
fill_val = pft_initial_conditions_table[pft_i][state_var]
pft_cover_path = aligned_inputs['pft_{}'.format(pft_i)]
target_path = os.path.join(
sv_dir, '{}_{}.tif'.format(state_var, pft_i))
sv_key = '{}_{}_path'.format(state_var, pft_i)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.raster_calculator(
[(pft_cover_path, 1), (fill_val, 'raw')],
full_masked, target_path, gdal.GDT_Float32, _SV_NODATA)
return initial_sv_reg
def _calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulkd_path, edepth_path,
ompc_path):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): path to active organic soil carbon raster
som2c_2_path (string): path to slow organic soil carbon raster
som3c_path (string): path to passive organic soil carbon raster
bulkd_path (string): path to bulk density of soil raster
edepth (string): path to depth of soil raster
ompc_path (string): path to result, total soil organic matter
Side effects:
modifies or creates the raster indicated by `ompc_path`
Returns:
None
"""
def ompc_op(som1c_2, som2c_2, som3c, bulkd, edepth):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): state variable, active organic soil carbon
som2c_2_path (string): state variable, slow organic soil carbon
som3c_path (string): state variable, passive organic soil carbon
bulkd_path (string): input, bulk density of soil
edepth_path (string): parameter, depth of soil for this
calculation
Returns:
ompc, total soil organic matter weighted by bulk
density.
"""
ompc = numpy.empty(som1c_2.shape, dtype=numpy.float32)
ompc[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som2c_2, _SV_NODATA)) &
(~numpy.isclose(som3c, _SV_NODATA)) &
(~numpy.isclose(bulkd, bulkd_nodata)) &
(edepth != _IC_NODATA))
ompc[valid_mask] = (
(som1c_2[valid_mask] + som2c_2[valid_mask] +
som3c[valid_mask]) * 1.724 /
(10000. * bulkd[valid_mask] * edepth[valid_mask]))
return ompc
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som2c_2_path, som3c_path,
bulkd_path, edepth_path]],
ompc_op, ompc_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, afiel_path):
"""Calculate field capacity for one soil layer.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
afiel_path (string): path to result raster, field capacity for this
soil layer
Side effects:
creates the raster indicated by `afiel_path`
Returns:
None
"""
def afiel_op(sand, silt, clay, ompc, bulkd):
"""Calculate field capacity for one soil layer.
Field capacity, maximum soil moisture retention capacity,
from <NAME> Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
afiel, field capacity for this soil layer
"""
afiel = numpy.empty(sand.shape, dtype=numpy.float32)
afiel[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
afiel[valid_mask] = (
0.3075 * sand[valid_mask] + 0.5886 * silt[valid_mask] +
0.8039 * clay[valid_mask] + 2.208E-03 * ompc[valid_mask] +
-0.1434 * bulkd[valid_mask])
return afiel
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
afiel_op, afiel_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, awilt_path):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
awilt_path (string): path to result raster, wilting point for this
soil layer
Side effects:
creates the raster indicated by `awilt_path`
Returns:
None
"""
def awilt_op(sand, silt, clay, ompc, bulkd):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
awilt, wilting point for this soil layer
"""
awilt = numpy.empty(sand.shape, dtype=numpy.float32)
awilt[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
awilt[valid_mask] = (
-0.0059 * sand[valid_mask] + 0.1142 * silt[valid_mask] +
0.5766 * clay[valid_mask] + 2.228E-03 * ompc[valid_mask] +
0.02671 * bulkd[valid_mask])
return awilt
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
awilt_op, awilt_path, gdal.GDT_Float32, _TARGET_NODATA)
def _afiel_awilt(
site_index_path, site_param_table, som1c_2_path, som2c_2_path,
som3c_path, sand_path, silt_path, clay_path, bulk_d_path, pp_reg):
"""Calculate field capacity and wilting point for each soil layer.
Computations based on Gupta and Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution, organic
matter percent and bulk density'. Water Resources Research 15:1633.
Field capacity is calculated for -0.33 bar; wilting point is
calculated for water content at -15 bars.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters including 'edepth' field
som1c_2_path (string): path to the state variable 'som1c_2',
active organic soil carbon
som2c_2_path (string): path to the state variable 'som2c_2',
slow organic soil carbon
som3c_path (string): path to the state variable 'som3c',
passive organic soil carbon
sand_path (string): path to raster containing proportion sand in soil
silt_path (string): path to raster containing proportion silt in soil
clay_path (string): path to raster containing proportion clay in soil
bulk_d_path (string): path to raster containing bulk density of soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
Modifies the rasters pp_reg['afiel_<layer>'] and pp_reg['awilt_<layer>']
for all soil layers.
Returns:
None
"""
def decrement_ompc(ompc_orig_path, ompc_dec_path):
"""Decrease estimated organic matter to 85% of its value.
In each subsequent soil layer, estimated organic matter is decreased
by 15%, to 85% of its previous value.
Parameters:
ompc_orig_path (string): path to estimated soil organic matter
raster
ompc_dec_path (string): path to result raster, estimated soil
organic matter decreased to 85% of its previous value
Side effects:
modifies or creates the raster indicated by `ompc_dec_path`
Returns:
None
"""
def decrement_op(ompc_orig):
"""Reduce organic matter to 85% of its previous value."""
ompc_dec = numpy.empty(ompc_orig.shape, dtype=numpy.float32)
ompc_dec[:] = _TARGET_NODATA
valid_mask = (ompc_orig != _TARGET_NODATA)
ompc_dec[valid_mask] = ompc_orig[valid_mask] * 0.85
return ompc_dec
pygeoprocessing.raster_calculator(
[(ompc_orig_path, 1)], decrement_op, ompc_dec_path,
gdal.GDT_Float32, _TARGET_NODATA)
# temporary intermediate rasters for calculating field capacity and
# wilting point
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
edepth_path = os.path.join(temp_dir, 'edepth.tif')
ompc_path = os.path.join(temp_dir, 'ompc.tif')
site_to_edepth = dict(
[(site_code, float(table['edepth'])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_edepth, edepth_path, gdal.GDT_Float32,
_IC_NODATA)
# estimate total soil organic matter
_calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulk_d_path, edepth_path,
ompc_path)
# calculate field capacity and wilting point for each soil layer,
# decreasing organic matter content by 85% with each layer
for lyr in range(1, 10):
afiel_path = pp_reg['afiel_{}_path'.format(lyr)]
awilt_path = pp_reg['awilt_{}_path'.format(lyr)]
_calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
afiel_path)
_calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
awilt_path)
ompc_dec_path = os.path.join(temp_dir, 'ompc{}.tif'.format(lyr))
decrement_ompc(ompc_path, ompc_dec_path)
ompc_path = ompc_dec_path
# clean up temporary files
shutil.rmtree(temp_dir)
def _persistent_params(
site_index_path, site_param_table, sand_path, clay_path, pp_reg):
"""Calculate persistent parameters.
The calculated values do not change over the course of the simulation.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sand_path (string): path to raster containing proportion sand in soil
clay_path (string): path to raster containing proportion clay in soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['wc_path']
pp_reg['eftext_path']
pp_reg['p1co2_2_path']
pp_reg['fps1s3_path']
pp_reg['fps2s3_path']
pp_reg['orglch_path']
pp_reg['vlossg_path']
Returns:
None
"""
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
# temporary intermediate rasters for persistent parameters calculation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in[
'peftxa', 'peftxb', 'p1co2a_2', 'p1co2b_2', 'ps1s3_1',
'ps1s3_2', 'ps2s3_1', 'ps2s3_2', 'omlech_1', 'omlech_2', 'vlossg']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
def calc_wc(afiel_1, awilt_1):
"""Calculate water content of soil layer 1."""
return afiel_1 - awilt_1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pp_reg['afiel_1_path'], pp_reg['awilt_1_path']]],
calc_wc, pp_reg['wc_path'], gdal.GDT_Float32, _TARGET_NODATA)
def calc_eftext(peftxa, peftxb, sand):
"""Calculate effect of soil texture on microbial decomposition.
Use an empirical regression to estimate the effect of soil
sand content on the microbe decomposition rate. Line 359 Prelim.f
Parameters:
peftxa (numpy.ndarray): parameter, regression intercept
peftxb (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
eftext, coefficient that modifies microbe decomposition rate.
"""
eftext = numpy.empty(sand.shape, dtype=numpy.float32)
eftext[:] = _IC_NODATA
valid_mask = (
(peftxa != _IC_NODATA) &
(peftxb != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
eftext[valid_mask] = (
peftxa[valid_mask] + (peftxb[valid_mask] * sand[valid_mask]))
return eftext
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['peftxa'], param_val_dict['peftxb'], sand_path]],
calc_eftext, pp_reg['eftext_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_p1co2_2(p1co2a_2, p1co2b_2, sand):
"""Calculate the fraction of carbon lost to CO2 from som1c_2.
During decomposition from active organic soil carbon, a fraction
of decomposing material is lost to CO2 as the soil respires.
Line 366 Prelim.f
Parameters:
p1co2a_2 (numpy.ndarray): parameter, intercept of regression
predicting loss to CO2 from active organic soil carbon
p1co2b_2 (numpy.ndarray): parameter, slope of regression
predicting loss to CO2 from active organic soil carbon
sand (numpy.ndarray): input, proportion sand in soil
Returns:
p1co2_2, fraction of carbon that flows to CO2 from active
organic soil carbon
"""
p1co2_2 = numpy.empty(sand.shape, dtype=numpy.float32)
p1co2_2[:] = _IC_NODATA
valid_mask = (
(p1co2a_2 != _IC_NODATA) &
(p1co2b_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
p1co2_2[valid_mask] = (
p1co2a_2[valid_mask] + (p1co2b_2[valid_mask] * sand[valid_mask]))
return p1co2_2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['p1co2a_2'],
param_val_dict['p1co2b_2'], sand_path]],
calc_p1co2_2, pp_reg['p1co2_2_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps1s3(ps1s3_1, ps1s3_2, clay):
"""Calculate effect of clay content on decomposition from som1c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from soil organic matter with fast turnover to
soil organic matter with slow turnover. Line 370 Prelim.f
Parameters:
ps1s3_1 (numpy.ndarray): parameter, regression intercept
ps1s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps1s3, coefficient that modifies rate of decomposition
from som1c_2
"""
fps1s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps1s3[:] = _IC_NODATA
valid_mask = (
(ps1s3_1 != _IC_NODATA) &
(ps1s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps1s3[valid_mask] = (
ps1s3_1[valid_mask] + (ps1s3_2[valid_mask] * clay[valid_mask]))
return fps1s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps1s3_1'], param_val_dict['ps1s3_2'], clay_path]],
calc_fps1s3, pp_reg['fps1s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps2s3(ps2s3_1, ps2s3_2, clay):
"""Calculate effect of clay content on decomposition from som2c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from slow soil organic carbon to soil passive organic
carbon. Line 371 Prelim.f
Parameters:
ps2s3_1 (numpy.ndarray): parameter, regression intercept
ps2s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps2s3, coefficient that modifies rate of decomposition from
som2c_2 to som3c
"""
fps2s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps2s3[:] = _IC_NODATA
valid_mask = (
(ps2s3_1 != _IC_NODATA) &
(ps2s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps2s3[valid_mask] = (
ps2s3_1[valid_mask] + (ps2s3_2[valid_mask] * clay[valid_mask]))
return fps2s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps2s3_1'], param_val_dict['ps2s3_2'], clay_path]],
calc_fps2s3, pp_reg['fps2s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_orglch(omlech_1, omlech_2, sand):
"""Calculate the effect of sand content on leaching from soil.
Use an empirical regression to estimate the effect of sand content
of soil on rate of organic leaching from soil when there is drainage
of soil water from soil layer 1 to soil layer 2. Line 110 Predec.f
Parameters:
omlech_1 (numpy.ndarray): parameter, regression intercept
omlech_2 (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
orglch, the fraction of organic compounds leaching from soil
with drainage from soil layer 1 to layer 2
"""
orglch = numpy.empty(sand.shape, dtype=numpy.float32)
orglch[:] = _IC_NODATA
valid_mask = (
(omlech_1 != _IC_NODATA) &
(omlech_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
orglch[valid_mask] = (
omlech_1[valid_mask] + (omlech_2[valid_mask] * sand[valid_mask]))
return orglch
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['omlech_1'], param_val_dict['omlech_2'],
sand_path]],
calc_orglch, pp_reg['orglch_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_vlossg(vlossg_param, clay):
"""Calculate proportion of gross mineralized N that is volatized.
During decomposition, some N is lost to volatilization. This is a
function of the gross mineralized N and is calculated according to this
multiplier, which varies with soil clay content.
Parameters:
vlossg (numpy.ndarray): parameter, volatilization loss multiplier
clay (numpy.ndarray): input, proportion clay in soil
Returns:
vlossg, proportion of gross mineralized N that is volatized
"""
valid_mask = (
(vlossg_param != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
vlossg = numpy.empty(vlossg_param.shape, dtype=numpy.float32)
vlossg[:] = _IC_NODATA
max_mask = ((clay > 0.3) & valid_mask)
min_mask = ((clay < 0.1) & valid_mask)
vlossg[valid_mask] = -0.1 * (clay[valid_mask] - 0.3) + 0.01
vlossg[max_mask] = 0.01
vlossg[min_mask] = 0.03
vlossg[valid_mask] = vlossg[valid_mask] * vlossg_param[valid_mask]
return vlossg
pygeoprocessing.raster_calculator(
[(path, 1) for path in [param_val_dict['vlossg'], clay_path]],
calc_vlossg, pp_reg['vlossg_path'], gdal.GDT_Float32, _IC_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _aboveground_ratio(anps, tca, pcemic_1, pcemic_2, pcemic_3):
"""Calculate C/<iel> ratios of decomposing aboveground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in aboveground material for the material to decompose. Agdrat.f
Parameters:
anps (numpy.ndarray): state variable, N or P in the donor material
tca (numpy.ndarray): state variable, total C in the donor material
pcemic_1 (numpy.ndarray): parameter, maximum C/<iel> of new material
pcemic_2 (numpy.ndarray): parameter, minimum C/<iel> of new material
pcemic_3 (numpy.ndarray): parameter, minimum <iel> content of
decomposing material that gives minimum C/<iel> of new material
Returns:
agdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(tca, _SV_NODATA)) &
(pcemic_1 != _IC_NODATA) &
(pcemic_2 != _IC_NODATA) &
(pcemic_3 != _IC_NODATA))
cemicb = numpy.empty(anps.shape, dtype=numpy.float32)
cemicb[:] = _IC_NODATA
cemicb[valid_mask] = (
(pcemic_2[valid_mask] - pcemic_1[valid_mask]) /
pcemic_3[valid_mask])
econt = numpy.empty(anps.shape, dtype=numpy.float32)
econt[:] = _TARGET_NODATA
econt[valid_mask] = 0
decompose_mask = ((tca > 0.) & valid_mask)
econt[decompose_mask] = anps[decompose_mask] / (tca[decompose_mask] * 2.5)
agdrat = numpy.empty(anps.shape, dtype=numpy.float32)
agdrat[:] = _TARGET_NODATA
agdrat[valid_mask] = pcemic_2[valid_mask]
compute_mask = ((econt <= pcemic_3) & valid_mask)
agdrat[compute_mask] = (
pcemic_1[compute_mask] + econt[compute_mask] * cemicb[compute_mask])
return agdrat
def _belowground_ratio(aminrl, varat_1_iel, varat_2_iel, varat_3_iel):
"""Calculate C/<iel> ratios of decomposing belowground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in soil metabolic material to decompose. Bgdrat.f
Parameters:
aminrl (numpy.ndarray): derived, average surface mineral <iel>
varat_1_iel (numpy.ndarray): parameter, maximum C/<iel> ratio for
newly decomposed material
varat_2_iel (numpy.ndarray): parameter, minimum C/<iel> ratio
varat_3_iel (numpy.ndarray): parameter, amount of <iel> present
when minimum ratio applies
Returns:
bgdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(aminrl, _SV_NODATA)) &
(varat_1_iel != _IC_NODATA) &
(varat_2_iel != _IC_NODATA) &
(varat_3_iel != _IC_NODATA))
bgdrat = numpy.empty(aminrl.shape, dtype=numpy.float32)
bgdrat[:] = _TARGET_NODATA
bgdrat[valid_mask] = (
(1. - aminrl[valid_mask] / varat_3_iel[valid_mask]) *
(varat_1_iel[valid_mask] - varat_2_iel[valid_mask]) +
varat_2_iel[valid_mask])
max_mask = ((aminrl <= 0) & valid_mask)
bgdrat[max_mask] = varat_1_iel[max_mask]
min_mask = ((aminrl > varat_3_iel) & valid_mask)
bgdrat[min_mask] = varat_2_iel[min_mask]
return bgdrat
def _structural_ratios(site_index_path, site_param_table, sv_reg, pp_reg):
"""Calculate maximum C/N and C/P ratios for structural material.
These ratios limit decomposition of structural material (i.e., material
containing lignin). Lines 31-77 Predec.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['rnewas_1_1_path']
pp_reg['rnewas_1_2_path']
pp_reg['rnewas_2_1_path']
pp_reg['rnewas_2_2_path']
pp_reg['rnewbs_1_1_path']
pp_reg['rnewbs_1_2_path']
pp_reg['rnewbs_2_1_path']
pp_reg['rnewbs_2_2_path']
Returns:
None
"""
# temporary parameter rasters for structural ratios calculations
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for iel in [1, 2]:
for val in[
'pcemic1_2', 'pcemic1_1', 'pcemic1_3', 'pcemic2_2',
'pcemic2_1', 'pcemic2_3', 'rad1p_1', 'rad1p_2',
'rad1p_3', 'varat1_1', 'varat22_1']:
target_path = os.path.join(temp_dir, '{}_{}.tif'.format(val, iel))
param_val_dict['{}_{}'.format(val, iel)] = target_path
site_to_val = dict(
[(site_code, float(table['{}_{}'.format(val, iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
def calc_rnewas_som2(
pcemic2_2, pcemic2_1, pcemic2_3, struce_1, strucc_1, rad1p_1,
rad1p_2, rad1p_3, pcemic1_2, rnewas1):
"""Calculate C/<iel> ratio for decomposition into som2.
This ratio is calculated separately for each nutrient (i.e., N, P).
When material decomposes into the surface slow organic pool, the
C/<iel> ratio of decomposing material must be smaller than or equal to
this ratio. A portion of the ratio of material entering som1, the
surface active pool, is also added to som2 and calculated here.
Parameters:
pcemic2_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface slow organic pool
pcemic2_1 (numpy.ndarray): parameter, maximum C/<iel> ratio for
surface slow organic pool
pcemic2_3 (numpy.ndarray): parameter, mimimum <iel> content of
decomposing aboveground material, above which the C/<iel>
ratio of the surface slow organic pool equals pcemic1_2
struce_1 (numpy.ndarray): state variable, <iel> in surface
structural material
strucc_1 (numpy.ndarray): state variable, C in surface
structural material
rad1p_1 (numpy.ndarray): parameter, intercept of regression used
to calculate addition of <iel> from surface active pool
rad1p_2 (numpy.ndarray): parameter, slope of regression used
to calculate addition of <iel> from surface active pool
rad1p_3 (numpy.ndarray): parameter, minimum allowable C/<iel>
used to calculate addition term for C/<iel> ratio of som2
formed from surface active pool
pcemic1_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface active organic pool
rnewas1 (numpy.ndarray): derived, C/<iel> ratio for decomposition
into som1
Returns:
rnewas2, required ratio for decomposition of structural material
into som2 for one nutrient
"""
valid_mask = (
(pcemic2_2 != _IC_NODATA) &
(pcemic2_1 != _IC_NODATA) &
(pcemic2_3 != _IC_NODATA) &
(~numpy.isclose(struce_1, _SV_NODATA)) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(rad1p_1 != _IC_NODATA) &
(rad1p_2 != _IC_NODATA) &
(rad1p_3 != _IC_NODATA) &
(pcemic1_2 != _IC_NODATA) &
(rnewas1 != _TARGET_NODATA))
rnewas2 = _aboveground_ratio(
struce_1, strucc_1, pcemic2_1, pcemic2_2, pcemic2_3)
radds1 = numpy.empty(strucc_1.shape, dtype=numpy.float32)
radds1[:] = _TARGET_NODATA
radds1[valid_mask] = (
rad1p_1[valid_mask] + rad1p_2[valid_mask] *
(rnewas1[valid_mask] - pcemic1_2[valid_mask]))
rnewas2[valid_mask] = rnewas1[valid_mask] + radds1[valid_mask]
rnewas2[valid_mask] = numpy.maximum(
rnewas2[valid_mask], rad1p_3[valid_mask])
return rnewas2
for iel in [1, 2]:
# calculate rnewas_iel_1 - aboveground material to SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['pcemic1_1_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
param_val_dict['pcemic1_3_{}'.format(iel)]]],
_aboveground_ratio, pp_reg['rnewas_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewas_iel_2 - aboveground material to SOM2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['pcemic2_2_{}'.format(iel)],
param_val_dict['pcemic2_1_{}'.format(iel)],
param_val_dict['pcemic2_3_{}'.format(iel)],
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['rad1p_1_{}'.format(iel)],
param_val_dict['rad1p_2_{}'.format(iel)],
param_val_dict['rad1p_3_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
pp_reg['rnewas_{}_1_path'.format(iel)]]],
calc_rnewas_som2, pp_reg['rnewas_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_1 - belowground material to SOM1
site_to_varat1_1 = dict([
(site_code, float(table['varat1_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat1_1,
pp_reg['rnewbs_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_2 - belowground material to SOM2
# rnewbs(iel,2) = varat22(1,iel)
site_to_varat22_1 = dict([
(site_code, float(table['varat22_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat22_1,
pp_reg['rnewbs_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg):
"""Calculate quantities that remain static for 12 months.
These quantities are annual precipitation, annual atmospheric N
deposition, and the fraction of plant residue which is lignin for each pft.
Century also calculates non-symbiotic soil N fixation once yearly, but here
those were moved to monthly tasks. Century uses precipitation in the future
12 months (prcgrw) to predict root:shoot ratios, but here we instead use
the sum of monthly precipitation in 12 months including the current one, if
data for 12 future months are not available.
Lines 79-82, 164 Eachyr.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including monthly precipitation and site
spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
month_index (int): current monthly step, relative to 0 so that
month_index=0 at first monthly time step
pft_id_set (set): set of integers identifying plant functional types
year_reg (dict): map of key, path pairs giving paths to the annual
precipitation and N deposition rasters
Side effects:
modifies or creates the rasters indicated by:
year_reg['annual_precip_path']
year_reg['baseNdep_path']
year_reg['pltlig_above_<pft>'] for each pft
year_reg['pltlig_below_<pft>'] for each pft
Returns:
None
Raises:
ValueError if fewer than 12 monthly precipitation rasters can be found
"""
def calc_base_N_dep(epnfa_1, epnfa_2, prcann):
"""Calculate base annual atmospheric N deposition.
Parameters:
epnfa_1 (numpy.ndarray): parameter, intercept of regression
predicting atmospheric N deposition from precipitation
epnfa_2 (numpy.ndarray): parameter, slope of regression predicting
atmospheric N deposition from precipitation
prcann (numpy.ndarray): derived, annual precipitation
Returns:
baseNdep, annual atmospheric N deposition
"""
baseNdep = numpy.empty(prcann.shape, dtype=numpy.float32)
baseNdep[:] = 0.
valid_mask = (
(epnfa_1 != _IC_NODATA) &
(epnfa_2 != _IC_NODATA) &
(prcann != _TARGET_NODATA))
baseNdep[valid_mask] = (
epnfa_1[valid_mask] +
(epnfa_2[valid_mask] * numpy.minimum(prcann[valid_mask], 80.)))
baseNdep[baseNdep < 0] = 0.
return baseNdep
def calc_pltlig(fligni_1_lyr, fligni_2_lyr, prcann):
"""Calculate the fraction of residue that is lignin. Cmplig.f
This fraction is used to calculate the fraction of residue (i.e.,
incoming litter from fall of standing dead or incoming soil from death
of roots) that is partitioned to metabolic vs structural pools. It is
calculated once per year from annual precipitation and fixed
parameters.
Parameters:
fligni_1_lyr (numpy.ndarray): parameter, intercept for regression
predicting lignin content fraction from rainfall
fligni_2_lyr (numpy.ndarray): parameter, slope for regression
predicting lignin content fraction from rainfall
prcann (numpy.ndarray): derived, annual precipitation
Returns:
pltlig_lyr, fraction of residue that is lignin
"""
valid_mask = (
(fligni_1_lyr != _IC_NODATA) &
(fligni_2_lyr != _IC_NODATA) &
(prcann != _TARGET_NODATA))
pltlig = numpy.empty(fligni_1_lyr.shape, dtype=numpy.float32)
pltlig[:] = _TARGET_NODATA
pltlig[valid_mask] = (
fligni_1_lyr[valid_mask] + fligni_2_lyr[valid_mask] *
prcann[valid_mask])
pltlig[valid_mask] = numpy.clip(pltlig[valid_mask], 0.02, 0.5)
return pltlig
offset = -12
annual_precip_rasters = []
while len(annual_precip_rasters) < 12:
offset += 1
if offset == 12:
raise ValueError("Insufficient precipitation rasters were found")
precip_month = month_index + offset
try:
annual_precip_rasters.append(
aligned_inputs['precip_%d' % precip_month])
except KeyError:
continue
precip_nodata = set([])
for precip_raster in annual_precip_rasters:
precip_nodata.update(
set([pygeoprocessing.get_raster_info(precip_raster)['nodata'][0]]))
if len(precip_nodata) > 1:
raise ValueError("Precipitation rasters include >1 nodata value")
precip_nodata = list(precip_nodata)[0]
raster_list_sum(
annual_precip_rasters, precip_nodata, year_reg['annual_precip_path'],
_TARGET_NODATA)
# intermediate parameter rasters for this operation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in['epnfa_1', 'epnfa_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['fligni_1_1', 'fligni_2_1', 'fligni_1_2', 'fligni_2_2']:
for pft_i in pft_id_set:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
# calculate base N deposition
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['epnfa_1'], param_val_dict['epnfa_2'],
year_reg['annual_precip_path']]],
calc_base_N_dep, year_reg['baseNdep_path'], gdal.GDT_Float32,
_TARGET_NODATA)
for pft_i in pft_id_set:
# fraction of surface residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_1_{}'.format(pft_i)],
param_val_dict['fligni_2_1_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_above_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# fraction of soil residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_2_{}'.format(pft_i)],
param_val_dict['fligni_2_2_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_below_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_latitude(template_raster, latitude_raster_path):
"""Calculate latitude at the center of each pixel in a template raster."""
pygeoprocessing.new_raster_from_base(
template_raster, latitude_raster_path, gdal.GDT_Float32,
[_IC_NODATA])
latitude_raster = gdal.OpenEx(
latitude_raster_path, gdal.OF_RASTER | gdal.GA_Update)
target_band = latitude_raster.GetRasterBand(1)
base_raster_info = pygeoprocessing.get_raster_info(template_raster)
geotransform = base_raster_info['geotransform']
for offset_map, raster_block in pygeoprocessing.iterblocks(
(template_raster, 1)):
n_y_block = raster_block.shape[0]
n_x_block = raster_block.shape[1]
# offset by .5 so we're in the center of the pixel
xoff = offset_map['xoff'] + 0.5
yoff = offset_map['yoff'] + 0.5
# calculate the projected x and y coordinate bounds for the block
x_range = numpy.linspace(
geotransform[0] + geotransform[1] * xoff,
geotransform[0] + geotransform[1] * (xoff + n_x_block - 1),
n_x_block)
y_range = numpy.linspace(
geotransform[3] + geotransform[5] * yoff,
geotransform[3] + geotransform[5] * (yoff + n_y_block - 1),
n_y_block)
# we'll use this to avoid generating any nodata points
valid_mask = raster_block != base_raster_info['nodata']
# these indexes correspond to projected coordinates
# y_vector is what we want, an array of latitude coordinates
x_vector, y_vector = numpy.meshgrid(x_range, y_range)
target_band.WriteArray(
y_vector, xoff=offset_map['xoff'], yoff=offset_map['yoff'])
# Making sure the band and dataset is flushed and not in memory
target_band.FlushCache()
target_band.FlushCache()
target_band = None
gdal.Dataset.__swig_destroy__(latitude_raster)
latitude_raster = None
def _calc_daylength(template_raster, month, daylength_path):
"""Calculate estimated hours of daylength. Daylen.c.
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
daylength_path (string): path to shortwave radiation raster
Side effects:
modifies or creates the raster indicated by `daylength_path`
Returns:
None
"""
def daylength(month):
def _daylength(latitude):
"""Estimate hours of daylength for a given month and latitude."""
# Julian day at beginning of each month
jday_list = [
1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 337]
jday = jday_list[month - 1]
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
declin = 0.4014 * numpy.sin(6.283185 * (jday - 77.0) / 365)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0] = 0
par1 = numpy.sqrt(temp)
par2 = -numpy.tan(rlatitude) * numpy.tan(declin)
ahou = numpy.arctan2(par1, par2)
hours_of_daylength = (ahou / numpy.pi) * 24
return hours_of_daylength
return _daylength
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)], daylength(month), daylength_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _shortwave_radiation(template_raster, month, shwave_path):
"""Calculate shortwave radiation outside the atmosphere.
Shortwave radiation outside the atmosphere is calculated according to
Penman (1948), "Natural evaporation from open water, bare soil and grass",
Proc. Roy. Soc. London. The latitude of each pixel is required to
calculate radiation and is calculated as an intermediate step from the
input `template_raster`. shwave.f
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
shwave_path (string): path to shortwave radiation raster
Side effects:
Modifies the raster indicated by `shwave_path`
Returns:
None
"""
def shwave(month):
def _shwave(latitude):
"""Calculate shortwave radiation outside the atmosphere.
Parameters:
latitude (float): latitude of current site in degrees
month (int): current month of the year, such that month=1
indicates January
Returns:
shwave, short wave solar radiation outside the atmosphere
"""
# Julian date in middle of each month of the year
jday_list = [
16, 46, 75, 106, 136, 167, 197, 228, 259, 289, 320, 350]
jday = jday_list[month - 1]
transcof = 0.8
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
# short wave solar radiation on a clear day
declin = 0.401426 * numpy.sin(6.283185 * (jday - 77.0) / 365.0)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0.] = 0.
par1 = numpy.sqrt(temp)
par2 = (-numpy.tan(rlatitude) * numpy.tan(declin))
ahou = numpy.arctan2(par1, par2)
ahou[ahou < 0.] = 0.
solrad = (
917.0 * transcof * (
ahou * numpy.sin(rlatitude) * numpy.sin(declin) +
numpy.cos(rlatitude) *
numpy.cos(declin) * numpy.sin(ahou)))
# short wave radiation outside the atmosphere
shwave = solrad / transcof
return shwave
return _shwave
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)],
shwave(month), shwave_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _reference_evapotranspiration(
max_temp_path, min_temp_path, shwave_path, fwloss_4_path,
pevap_path):
"""Calculate reference evapotranspiration.
Reference evapotranspiration from the FAO Penman-Monteith equation in
"Guidelines for computing crop water requirements", FAO Irrigation and
drainage paper 56 (http://www.fao.org/docrep/X0490E/x0490e08.htm),
modified by the parameter fwloss(4).
Parameters:
max_temp_path (string): path to maximum monthly temperature
min_temp_path (string): path to minimum monthly temperature
shwave_path (string): path to shortwave radiation outside the
atmosphere
fwloss_4_path (string): path to parameter, scaling factor for
reference evapotranspiration
pevap_path (string): path to result, reference evapotranspiration
raster
Side effects:
modifies or creates the raster indicated by `pevap_path`
Returns:
None
"""
def _calc_pevap(max_temp, min_temp, shwave, fwloss_4):
"""Calculate reference evapotranspiration.
Pevap.f
Parameters:
max_temp (numpy.ndarray): input, maximum monthly temperature
min_temp (numpy.ndarray): input, minimum monthly temperature
shwave (numpy.ndarray): derived, shortwave radiation outside the
atmosphere
fwloss_4 (numpy.ndarray): parameter, scaling factor for reference
evapotranspiration
Returns:
pevap, reference evapotranspiration
"""
const1 = 0.0023
const2 = 17.8
langleys2watts = 54.0
valid_mask = (
(~numpy.isclose(max_temp, maxtmp_nodata)) &
(~numpy.isclose(min_temp, mintmp_nodata)) &
(shwave != _TARGET_NODATA) &
(fwloss_4 != _IC_NODATA))
trange = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
trange[:] = _TARGET_NODATA
trange[valid_mask] = max_temp[valid_mask] - min_temp[valid_mask]
tmean = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
tmean[:] = _IC_NODATA
tmean[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.0
# daily reference evapotranspiration
daypet = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
daypet[:] = _TARGET_NODATA
in1 = const1 * (tmean[valid_mask] + const2)
in2 = numpy.sqrt(trange[valid_mask])
in3 = (shwave[valid_mask] / langleys2watts)
daypet[valid_mask] = (
const1 * (tmean[valid_mask] + const2) *
numpy.sqrt(trange[valid_mask]) *
(shwave[valid_mask] / langleys2watts))
# monthly reference evapotranspiration, from mm to cm,
# bounded to be at least 0.5
monpet = (daypet * 30.) / 10.
monpet[monpet <= 0.5] = 0.5
pevap = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
pevap[:] = _TARGET_NODATA
pevap[valid_mask] = monpet[valid_mask] * fwloss_4[valid_mask]
return pevap
maxtmp_nodata = pygeoprocessing.get_raster_info(
max_temp_path)['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
min_temp_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
max_temp_path, min_temp_path, shwave_path, fwloss_4_path]],
_calc_pevap, pevap_path, gdal.GDT_Float32, _TARGET_NODATA)
def _potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg):
"""Calculate above- and belowground potential production.
Potential production of each plant functional type is calculated
as total potential production given incoming solar radiation,
limited by temperature, soil moisture, and obstruction by biomass and
litter. Further modification of potential production according to
limitation by water and nutrient availability is calculated in the
root:shoot ratio submodel. Lines 57-148 Potcrp.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=13
indicates month 13 of the simulation
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by `month_reg['h2ogef_1_<PFT>']` for each
plant functional type (PFT) where growth is scheduled to occur in
this month
creates the raster indicated by `month_reg['tgprod_pot_prod_<PFT>']`
for each plant functional type (PFT) where growth is scheduled to
occur in this month
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
if str(current_month) in veg_trait_table[pft_i]['growth_months']:
do_PFT.append(pft_i)
if not do_PFT:
return
def calc_ctemp(aglivc, pmxbio, maxtmp, pmxtmp, mintmp, pmntmp):
"""Calculate soil temperature relative to its effect on growth.
Soil temperature is calculated from monthly temperature inputs and
modified by total standing live biomass. Lines 69-84 Potcrp.f
Parameters:
aglivc (numpy.ndarray): derived, sum of aglivc (carbon in
aboveground live biomass) across plant functional types
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
pmxtmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly maximum temperature
mintmp (numpy.ndarray): input, average minimum monthly temperature
pmntmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly minimum temperature
Returns:
ctemp, effect of soil temperature on potential production
"""
bio = numpy.empty(aglivc.shape, dtype=numpy.float32)
bio[:] = _IC_NODATA
valid_mask = (
(aglivc >= 0.) &
(pmxbio != _IC_NODATA) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(pmxtmp != _IC_NODATA) &
(~numpy.isclose(mintmp, mintmp_nodata)) &
(pmntmp != _IC_NODATA))
bio[valid_mask] = aglivc[valid_mask] * 2.5
bio[bio > pmxbio] = pmxbio[bio > pmxbio]
bio[pmxbio < 0] = _IC_NODATA
# Maximum temperature
tmxs = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmxs[:] = _IC_NODATA
tmxs[valid_mask] = (
maxtmp[valid_mask] + (
(25.4/(1. + 18. * numpy.exp(-0.20 * maxtmp[valid_mask]))) *
(numpy.exp(pmxtmp[valid_mask] * bio[valid_mask]) - 0.13)))
# Minimum temperature
tmns = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmns[:] = _IC_NODATA
tmns[valid_mask] = (
mintmp[valid_mask] +
(pmntmp[valid_mask] * bio[valid_mask] - 1.78))
# Average temperature
ctemp = numpy.empty(aglivc.shape, dtype=numpy.float32)
ctemp[:] = _IC_NODATA
ctemp[valid_mask] = (tmxs[valid_mask] + tmns[valid_mask])/2.
return ctemp
def calc_potprd(mintmp, maxtmp, ctemp, ppdf_1, ppdf_2, ppdf_3, ppdf_4):
"""Calculate the limiting effect of temperature on growth.
Estimated soil temperature restricts potential production according to
a Poisson Density Function curve described by the plant functional
type-specific parameters ppdf_1-4.. Lines 73-84 Potcrp.f
Parameters:
mintmp (numpy.ndarray): input, average minimum monthly temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
ctemp (numpy.ndarray): derived, soil temperature as calculated from
monthly temperature and modified by standing live biomass
ppdf_1 (numpy.ndarray): parameter, optimum temperature for growth
ppdf_2 (numpy.ndarray): parameter, maximum temperature for growth
ppdf_3 (numpy.ndarray): parameter, left curve shape for Poisson
Density Function curve describing growth as function of
temperature
ppdf_4 (numpy.ndarray): parameter, right curve shape for Poisson
Density Function curve describing growth as function of
temperature
Returns:
potprd, scaling factor describing potential production limited
by temperature
"""
valid_mask = (
(~numpy.isclose(mintmp, mintmp_nodata)) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(ctemp != _IC_NODATA) &
(ppdf_1 != _IC_NODATA) &
(ppdf_2 != _IC_NODATA) &
(ppdf_3 != _IC_NODATA) &
(ppdf_4 != _IC_NODATA))
frac = numpy.empty(ctemp.shape, dtype=numpy.float32)
frac[:] = _TARGET_NODATA
frac[valid_mask] = (
(ppdf_2[valid_mask] - ctemp[valid_mask]) /
(ppdf_2[valid_mask] - ppdf_1[valid_mask]))
avg_tmp = numpy.empty(ctemp.shape, dtype=numpy.float32)
avg_tmp[valid_mask] = (mintmp[valid_mask] + maxtmp[valid_mask]) / 2.
grow_mask = (
(avg_tmp > 0) &
(frac > 0) &
valid_mask)
potprd = numpy.empty(ctemp.shape, dtype=numpy.float32)
potprd[:] = _TARGET_NODATA
potprd[valid_mask] = 0.
potprd[grow_mask] = (numpy.exp(
(ppdf_3[grow_mask]/ppdf_4[grow_mask]) *
(1. - numpy.power(frac[grow_mask], ppdf_4[grow_mask]))) *
numpy.power(frac[grow_mask], ppdf_3[grow_mask]))
return potprd
def calc_h2ogef_1(
pevap, avh2o_1, precip, wc, pprpts_1, pprpts_2, pprpts_3):
"""Calculate the limiting factor of water availability on growth.
Soil moisture restricts potential production according to the ratio
of available water to reference evapotranspiration. The shape of the
linear relationship of this ratio to potential production is
controlled by the site parameters pprpts_1, pprpts_2, and pprpts_3.
Lines 57-64 Potcrp.f
Parameters:
pevap (numpy.ndarray): derived, reference evapotranspiration
avh2o_1 (numpy.ndarray): state variable, water available to this
plant functional type for growth
precip (numpy.ndarray): input, precipitation for the current month
wc (numpy.ndarray): derived, water content in soil layer 1
pprpts_1 (numpy.ndarray): parameter, the minimum ratio of
available water to reference evapotranspiration that limits
production completely
pprpts_2 (numpy.ndarray): parameter, influences the slope of the
line predicting potential production from available water
pprpts_3 (numpy.ndarray): parameter, the ratio of available water
to reference evapotranspiration above which production is
not restricted
Returns:
h2ogef_1, scaling factor describing potential production limited
by soil moisture
"""
valid_mask = (
(pevap != _TARGET_NODATA) &
(~numpy.isclose(avh2o_1, _SV_NODATA)) &
(~numpy.isclose(precip, precip_nodata)) &
(wc != _TARGET_NODATA) &
(pprpts_1 != _IC_NODATA) &
(pprpts_2 != _IC_NODATA) &
(pprpts_3 != _IC_NODATA))
h2ogef_prior = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_prior[:] = _TARGET_NODATA
h2ogef_prior[valid_mask] = numpy.where(
pevap[valid_mask] >= 0.01,
(avh2o_1[valid_mask] + precip[valid_mask])/pevap[valid_mask],
0.01)
intcpt = (
pprpts_1[valid_mask] + (pprpts_2[valid_mask] * wc[valid_mask]))
slope = 1. / (pprpts_3[valid_mask] - intcpt)
h2ogef_1 = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_1[:] = _TARGET_NODATA
h2ogef_1[valid_mask] = (
1.0 + slope *
(h2ogef_prior[valid_mask] - pprpts_3[valid_mask]))
h2ogef_1[valid_mask] = numpy.clip(h2ogef_1[valid_mask], 0.01, 1.)
return h2ogef_1
def calc_biof(sum_stdedc, sum_aglivc, strucc_1, pmxbio, biok5):
"""Calculate the effect of obstruction on growth.
Live biomass, standing dead biomass, and litter reduce potential
production through obstruction. The shape of the relationship between
standing biomass and litter and potential production is controlled by
the site parameter pmxbio and the plant functional type parameter
biok5. Lines 91-120 Potcrp.f
Parameters:
sum_stdedc (numpy.ndarray): derived, total carbon in standing dead
biomass across plant functional types
sum_aglivc (numpy.ndarray): derived, total carbon in aboveground
live biomass across plant functional types
strucc_1 (numpy.ndarray): derived, carbon in surface litter
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
potential production
biok5 (numpy.ndarray): parameter, level of standing dead biomass
and litter
Returns:
biof, scaling factor describing potential production limited
by obstruction
"""
valid_mask = (
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(pmxbio != _IC_NODATA) &
(biok5 != _IC_NODATA))
bioc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioc[:] = _IC_NODATA
bioc[valid_mask] = numpy.where(
((sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]) <= 0.), 0.01,
(sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]))
bioc[valid_mask] = numpy.where(
(bioc[valid_mask] > pmxbio[valid_mask]), pmxbio[valid_mask],
bioc[valid_mask])
bioprd = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioprd[:] = _IC_NODATA
bioprd[valid_mask] = 1. - (
bioc[valid_mask] / (biok5[valid_mask] + bioc[valid_mask]))
temp1 = 1. - bioprd
temp2 = temp1 * 0.75
temp3 = temp1 * 0.25
ratlc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
ratlc[:] = _IC_NODATA
ratlc[valid_mask] = sum_aglivc[valid_mask] / bioc[valid_mask]
biof = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
biof[:] = _TARGET_NODATA
biof[valid_mask] = numpy.where(
ratlc[valid_mask] <= 1.,
(bioprd[valid_mask] + (temp2[valid_mask] * ratlc[valid_mask])),
numpy.where(
ratlc[valid_mask] <= 2.,
(bioprd[valid_mask] + temp2[valid_mask]) +
temp3[valid_mask] * (ratlc[valid_mask] - 1.),
1.))
return biof
def calc_tgprod_pot_prod(prdx_1, shwave, potprd, h2ogef_1, biof):
"""Calculate total potential production.
Total above- and belowground potential biomass production is calculated
as the total potential production given solar radiation and the
intrinsinc growth capacity of the plant functional type, modified by
limiting factors of temperature, soil moisture, and obstruction by
standing biomass and litter. Line 147 Potcrp.f
Parameters:
prdx_1 (numpy.ndarray): parameter, the intrinsic capacity of the
plant functional type for growth per unit of solar radiation
shwave (numpy.ndarray): derived, shortwave solar radiation outside
the atmosphere
potprd (numpy.ndarray): parameter, scaling factor describing
limiting effect of temperature
h2ogef_1 (numpy.ndarray): derived, scaling factor describing the
limiting effect of soil moisture
biof (numpy.ndarray): derived, scaling factor describing the
limiting effect of obstruction by standing biomass and litter
Returns:
tgprod_pot_prod, total above- and belowground potential biomass
production (g biomass)
"""
valid_mask = (
(prdx_1 != _IC_NODATA) &
(shwave != _TARGET_NODATA) &
(potprd != _TARGET_NODATA) &
(h2ogef_1 != _TARGET_NODATA) &
(biof != _TARGET_NODATA))
tgprod_pot_prod = numpy.empty(prdx_1.shape, dtype=numpy.float32)
tgprod_pot_prod[:] = _TARGET_NODATA
tgprod_pot_prod[valid_mask] = (
prdx_1[valid_mask] * shwave[valid_mask] * potprd[valid_mask] *
h2ogef_1[valid_mask] * biof[valid_mask])
return tgprod_pot_prod
# temporary intermediate rasters for calculating total potential production
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
# site-level temporary calculated values
for val in ['sum_aglivc', 'sum_stdedc', 'ctemp', 'shwave', 'pevap']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
# PFT-level temporary calculated values
for pft_i in pft_id_set:
for val in [
'aglivc_weighted', 'stdedc_weighted', 'potprd', 'biof']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
# temporary parameter rasters for calculating total potential production
param_val_dict = {}
# site-level parameters
for val in [
'pmxbio', 'pmxtmp', 'pmntmp', 'fwloss_4', 'pprpts_1',
'pprpts_2', 'pprpts_3']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for val in [
'ppdf_1', 'ppdf_2', 'ppdf_3', 'ppdf_4', 'biok5', 'prdx_1']:
for pft_i in do_PFT:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
maxtmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
precip_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['precip_{}'.format(month_index)])['nodata'][0]
# calculate intermediate quantities that do not differ between PFTs:
# sum of aglivc (standing live biomass) and stdedc (standing dead biomass)
# across PFTs, weighted by % cover of each PFT
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
# ctemp, soil temperature relative to impacts on growth
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'],
param_val_dict['pmxbio'],
aligned_inputs['max_temp_{}'.format(current_month)],
param_val_dict['pmxtmp'],
aligned_inputs['min_temp_{}'.format(current_month)],
param_val_dict['pmntmp']]],
calc_ctemp, temp_val_dict['ctemp'], gdal.GDT_Float32, _IC_NODATA)
# shwave, shortwave radiation outside the atmosphere
_shortwave_radiation(
aligned_inputs['site_index'], current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
temp_val_dict['shwave'],
param_val_dict['fwloss_4'],
temp_val_dict['pevap'])
# calculate quantities that differ between PFTs
for pft_i in do_PFT:
# potprd, the limiting effect of temperature
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['min_temp_{}'.format(current_month)],
aligned_inputs['max_temp_{}'.format(current_month)],
temp_val_dict['ctemp'],
param_val_dict['ppdf_1_{}'.format(pft_i)],
param_val_dict['ppdf_2_{}'.format(pft_i)],
param_val_dict['ppdf_3_{}'.format(pft_i)],
param_val_dict['ppdf_4_{}'.format(pft_i)]]],
calc_potprd, temp_val_dict['potprd_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# h2ogef_1, the limiting effect of soil water availability
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pevap'],
prev_sv_reg['avh2o_1_{}_path'.format(pft_i)],
aligned_inputs['precip_{}'.format(month_index)],
pp_reg['wc_path'],
param_val_dict['pprpts_1'],
param_val_dict['pprpts_2'],
param_val_dict['pprpts_3']]],
calc_h2ogef_1, month_reg['h2ogef_1_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# biof, the limiting effect of obstruction
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_stdedc'],
temp_val_dict['sum_aglivc'],
prev_sv_reg['strucc_1_path'],
param_val_dict['pmxbio'],
param_val_dict['biok5_{}'.format(pft_i)]]],
calc_biof, temp_val_dict['biof_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['prdx_1_{}'.format(pft_i)],
temp_val_dict['shwave'],
temp_val_dict['potprd_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
temp_val_dict['biof_{}'.format(pft_i)]]],
calc_tgprod_pot_prod,
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_favail_P(sv_reg, param_val_dict):
"""Calculate the fraction of P in surface layer available to plants.
This must be performed after the sum of mineral N in the surface layer
is calculated because the fraction of labile P available to plants is
impacted by the amount of mineral N in the surface layer.
Parameters:
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month, including minerl_1_1, mineral N in the
surface layer
param_val_dict (dict): map of key, path pairs giving paths to
site-level parameters, including favail_4, favail_5, favail_6,
and favail_2
Side effects:
modifies or creates the raster indicated by
`param_val_dict['favail_2']`
Returns:
None
"""
def favail_P_op(minerl_1_1, favail_4, favail_5, favail_6):
"""Calculate the fraction of P in surface layer available to plants.
The fraction of labile P available to plants depends on mineral N in
the surface layer and site parameters favail_4, favail_5, favail_6.
Line 395 Simsom.f
Parameters:
minerl_1_1 (numpy.ndarray): state variable, mineral N in the
surface layer
favail_4 (numpy.ndarray): parameter, minimum fraction of P
available
favail_5 (numpy.ndarray): parameter, maximum fraction of P
available
favail_6 (numpy.ndarray): parameter, mineral N in surface layer
required to attain maximum fraction of P available
Returns:
favail_P, fraction of mineral P available to plants
"""
valid_mask = (
(~numpy.isclose(minerl_1_1, _SV_NODATA)) &
(favail_4 != _IC_NODATA) &
(favail_5 != _IC_NODATA) &
(favail_6 != _IC_NODATA))
interim = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
interim[:] = _IC_NODATA
interim[valid_mask] = (
favail_4[valid_mask] + minerl_1_1[valid_mask] *
(favail_5[valid_mask] - favail_4[valid_mask]) /
favail_6[valid_mask])
favail_P = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
favail_P[:] = _IC_NODATA
favail_P[valid_mask] = numpy.maximum(
favail_4[valid_mask], numpy.minimum(
interim[valid_mask], favail_5[valid_mask]))
return favail_P
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['minerl_1_1_path'],
param_val_dict['favail_4'],
param_val_dict['favail_5'],
param_val_dict['favail_6']]],
favail_P_op, param_val_dict['favail_2'],
gdal.GDT_Float32, _IC_NODATA)
def _calc_avail_mineral_nutrient(pft_param_dict, sv_reg, iel, target_path):
"""Calculate one mineral nutrient available to one plant functional type.
The mineral nutrient available to a plant functional type is calculated
from the mineral nutrient content of soil layers accessible by that
plant function type.
Parameters:
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
iel (int): integer index for current nutrient (1=N, 2=P)
target_path (string): path to raster to contain available mineral
nutrient for this plant functional type and nutrient
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
nlay = int(pft_param_dict['nlaypg'])
mineral_raster_list = [
sv_reg['minerl_{}_{}_path'.format(lyr, iel)] for lyr in range(
1, nlay + 1)]
raster_list_sum(
mineral_raster_list, _SV_NODATA, target_path, _TARGET_NODATA,
nodata_remove=True)
def _calc_available_nutrient(
pft_i, iel, pft_param_dict, sv_reg, site_param_table, site_index_path,
availm_path, favail_path, tgprod_path, eavail_path):
"""Calculate nutrient available to a plant functional type.
The nutrient available is the sum of mineral nutrient (N or P) in soil
layers accessible by the roots of the plant functional type, modified
by the fraction of nutrient available to plants and the current root
biomass.
Parameters:
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
site_index_path (string): path to site spatial index raster
availm_path (string): path to raster containing available mineral
nutrient for the given plant functional type and nutrient
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
favail_path (string): path to raster containing the appropriate value
of the parameter favail. For nitrogen, this parameter is supplied
directly as user input, but for phosphorus, it must be calculated
from other parameters.
tgprod_path (string): path to raster containing total potential
production (g biomass)
eavail_path (string): path to location to store the result, nutrient
available to the plant functional type
Side effects:
modifies or creates the raster indicated by `eavail_path`
Returns:
None
"""
def calc_eavail(rictrl, bglivc, riint, availm, favail, crpstg):
"""Calculate available nutrient.
Parameters:
rictrl (numpy.ndarray): parameter, scaling factor used to
calculate the impact of root biomass on nutrient availability
bglivc (numpy.ndarray): state variable, carbon in belowground
live biomass
riint (numpy.ndarray): parameter, intercept used to calculate the
impact of root biomass on nutrient availability
availm (numpy.ndarray): derived, the sum of mineral nutrient in
soil layers accessible by this plant functional type
favail (numpy.ndarray): parameter, fraction of the nutrient
available each month to plants
crpstg (numpy.ndarray): state variable, nutrient in
retranslocation storage pool for the plant functional type
Returns:
eavail, the nutrient available to the plant functional type
"""
valid_mask = (
(rictrl != _IC_NODATA) &
(~numpy.isclose(bglivc, _SV_NODATA)) &
(riint != _IC_NODATA) &
(availm != _TARGET_NODATA) &
(favail != _IC_NODATA) &
(~numpy.isclose(crpstg, _SV_NODATA)))
rimpct = numpy.empty(rictrl.shape, dtype=numpy.float32)
rimpct[:] = _TARGET_NODATA
rimpct[valid_mask] = numpy.where(
((rictrl[valid_mask] * bglivc[valid_mask] * 2.5) > 33.),
1., 1. - riint[valid_mask] * numpy.exp(
-rictrl[valid_mask] * bglivc[valid_mask] * 2.5))
eavail = numpy.empty(rictrl.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = (
(availm[valid_mask] * favail[valid_mask] * rimpct[valid_mask]) +
crpstg[valid_mask])
return eavail
def add_symbiotic_fixed_N(eavail_prior, snfxmx, tgprod):
"""Add nitrogen fixed by the plant to nutrient available.
Some nitrogen may be fixed by the plant, and this must be added
to available mineral nitrogen. Nitrogen fixed by the plant is
calculated from total potential production and the maximum
rate of N fixation.
Parameters:
eavail_prior (numpy.ndarray): derived, mineral nitrogen available
to the plant functional type, calculated with calc_eavail()
snfxmx (numpy.ndarray): parameter, maximum rate of symbiotic
nitrogen fixation
tgprod (numpy.ndarray): derived, total above- and belowground
potential production (g biomass)
Returns:
eavail, total N available including N fixed by the plant
"""
valid_mask = (
(eavail_prior != _TARGET_NODATA) &
(snfxmx != _IC_NODATA) &
(tgprod != _TARGET_NODATA))
maxNfix = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
maxNfix[:] = _TARGET_NODATA
maxNfix[valid_mask] = snfxmx[valid_mask] * (tgprod[valid_mask] / 2.5)
eavail = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = eavail_prior[valid_mask] + maxNfix[valid_mask]
return eavail
# temporary intermediate rasters for calculating available nutrient
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in ['rictrl', 'riint']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['snfxmx_1']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
fill_val = pft_param_dict[val]
pygeoprocessing.new_raster_from_base(
site_index_path, target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['rictrl'],
sv_reg['bglivc_{}_path'.format(pft_i)],
param_val_dict['riint'],
availm_path, favail_path,
sv_reg['crpstg_{}_{}_path'.format(iel, pft_i)]]],
calc_eavail, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
if iel == 1:
eavail_prior_path = os.path.join(temp_dir, 'eavail_prior.tif')
shutil.copyfile(eavail_path, eavail_prior_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
eavail_prior_path,
param_val_dict['snfxmx_1'],
tgprod_path]],
add_symbiotic_fixed_N, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_nutrient_demand(
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path, demand_path):
"""Calculate the demand of one nutrient by a plant functional type.
Demand is calculated from total biomass production, the fraction of biomass
production allocated to roots, and the minimum carbon/nutrient ratios of
above- and belowground live biomass. Lines 88-92 CropDynC.f and line
65, Nutrlm.f
Parameters:
biomass_production_path (string): path to raster giving total
biomass production
fraction_allocated_to_roots_path (string): path to raster giving
the fraction fo total biomass production allocated to roots
cercrp_min_above_path (string): path to raster giving the minimum
ratio of carbon to nutrient in aboveground live biomass
cercrp_min_below_path (string): path to raster giving the minimum
ratio of carbon to nutrient in belowground live biomass
Side effects:
modifies or creates the raster indicated by `demand_path`
Returns:
None
"""
def nutrient_demand_op(
biomass_production, root_fraction, cercrp_min_above,
cercrp_min_below):
"""Calculate nutrient demand.
Parameters:
biomass_production (numpy.ndarray): derived, total biomass
production
root_fraction (numpy.ndarray): derived, fraction of biomass
allocated to roots
cercrp_min_above (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new aboveground live material
cercrp_min_below (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new belowground live material
Returns:
demand_e, nutrient demand
"""
valid_mask = (
(biomass_production != _TARGET_NODATA) &
(root_fraction != _TARGET_NODATA) &
(cercrp_min_above != _TARGET_NODATA) &
(cercrp_min_above > 0) &
(cercrp_min_below > 0) &
(cercrp_min_below != _TARGET_NODATA))
demand_above = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_above[:] = _TARGET_NODATA
demand_above[valid_mask] = (
((biomass_production[valid_mask] *
(1. - root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_above[valid_mask]))
demand_below = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_below[:] = _TARGET_NODATA
demand_below[valid_mask] = (
((biomass_production[valid_mask] *
(root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_below[valid_mask]))
demand_e = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_e[:] = _TARGET_NODATA
demand_e[valid_mask] = (
demand_above[valid_mask] + demand_below[valid_mask])
return demand_e
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path]],
nutrient_demand_op, demand_path,
gdal.GDT_Float32, _TARGET_NODATA)
def calc_provisional_fracrc(
annual_precip, frtcindx, bgppa, bgppb, agppa, agppb,
cfrtcw_1, cfrtcw_2, cfrtcn_1, cfrtcn_2):
"""Calculate provisional fraction of carbon allocated to roots.
A temporary provisional fraction of carbon allocated to roots must be
calculated prior to calculating plant demand for N and P. The value
of this provisional fraction depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 36-47 cropDynC.f
Parameters:
annual_precip (numpy.ndarray): derived, sum of monthly
precipitation over twelve months including the current month
frtcindx (numpy.ndarray): parameter, flag indicating whether
root:shoot allocation follows the Great Plains equation
(frtcindx=0) or as a perennial plant (frtcindx=1)
bgppa (numpy.ndarray): parameter, intercept in regression
estimating belowground production from annual precipitation
if frtcindx=0
bgppb (numpy.ndarray): parameter, slope in regression estimating
belowground production from annual precipitation if
frtcindx=0
agppa (numpy.ndarray): parameter, intercept in regression
estimating aboveground production from annual precipitation
if frtcindx=0
agppb (numpy.ndarray): parameter, slope in regression estimating
aboveground production from annual precipitation if
frtcindx=0
cfrtcw_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum water stress if frtcindx=1
cfrtcw_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots without water stress if frtcindx=1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum nutrient stress if frtcindx=1
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots under no nutrient stress if frtcindx=1
Returns:
fracrc_p, provisional fraction of carbon allocated to roots
"""
valid_mask = (
(annual_precip != _TARGET_NODATA) &
(frtcindx != _IC_NODATA) &
(bgppa != _IC_NODATA))
rtsh = numpy.empty(annual_precip.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[valid_mask] = (
(bgppa[valid_mask] +
annual_precip[valid_mask] * bgppb[valid_mask]) /
(agppa[valid_mask] + annual_precip[valid_mask] *
agppb[valid_mask]))
fracrc_p = numpy.empty(annual_precip.shape, dtype=numpy.float32)
fracrc_p[:] = _TARGET_NODATA
fracrc_p[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0,
(1.0 / (1.0 / rtsh[valid_mask] + 1.0)),
((cfrtcw_1[valid_mask] + cfrtcw_2[valid_mask] +
cfrtcn_1[valid_mask] + cfrtcn_2[valid_mask]) / 4.0))
return fracrc_p
def calc_ce_ratios(
pramn_1_path, pramn_2_path, aglivc_path, biomax_path,
pramx_1_path, pramx_2_path, prbmn_1_path, prbmn_2_path,
prbmx_1_path, prbmx_2_path, annual_precip_path, pft_i, iel,
month_reg):
"""Calculate minimum and maximum carbon to nutrient ratios.
Minimum and maximum C/E ratios are used to calculate demand for a
nutrient by a plant functional type. This function calculates the
ratios for above- and belowground plant portions, for one plant
functional type and one nutrient. Fltce.f
Parameters:
pramn_1_path (string): path to raster containing the parameter
pramn_<iel>_1, the minimum aboveground ratio with zero biomass
pramn_2_path (string): path to raster containing the parameter
pramn_<iel>_2, the minimum aboveground ratio with biomass greater
than or equal to biomax
aglivc_path (string): path to raster containing carbon in
aboveground live biomass
biomax_path (string): path to raster containing the parameter
biomax, the biomass above which the ratio equals pramn_2
or pramx_2
pramx_1_path (string): path to raster containing the parameter
pramx_<iel>_1, the maximum aboveground ratio with zero biomass
pramx_2_path (string): path to raster containing the parameter
pramx_<iel>_2, the maximum aboveground ratio with biomass greater
than or equal to biomax
prbmn_1_path (string): path to raster containing the parameter
prbmn_<iel>_1, intercept of regression to predict minimum
belowground ratio from annual precipitation
prbmn_2_path (string): path to raster containing the parameter
prbmn_<iel>_2, slope of regression to predict minimum belowground
ratio from annual precipitation
prbmx_1_path (string): path to raster containing the parameter
prbmx_<iel>_1, intercept of regression to predict maximum
belowground ratio from annual precipitation
prbmx_2_path (string): path to raster containing the parameter
prbmx_<iel>_2, slope of regression to predict maximum belowground
ratio from annual precipitation
annual_precip_path (string): path to annual precipitation raster
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
month_reg (dict): map of key, path pairs giving paths to
intermediate calculated values that are shared between
submodels
Side effects:
creates the rasters indicated by
`month_reg['cercrp_min_above_<iel>_<pft_i>']`,
`month_reg['cercrp_max_above_<iel>_<pft_i>']`,
`month_reg['cercrp_min_below_<iel>_<pft_i>']`,
`month_reg['cercrp_max_below_<iel>_<pft_i>']`,
Returns:
None
"""
def calc_above_ratio(pra_1, pra_2, aglivc, biomax):
"""Calculate carbon to nutrient ratio for aboveground material.
Parameters:
pra_1 (numpy.ndarray): parameter, minimum or maximum ratio
with zero biomass
pra_2 (numpy.ndarray): parameter, minimum or maximum ratio
with biomass greater than or equal to biomax
aglivc (numpy.ndarray): state variable, carbon in aboveground
live material
biomax (numpy:ndarray): parameter, biomass above which the
ratio equals pra_2
Returns:
cercrp_above, carbon to nutrient ratio for aboveground
material
"""
valid_mask = (
(pra_1 != _IC_NODATA) &
(pra_2 != _IC_NODATA) &
(~numpy.isclose(aglivc, _SV_NODATA)) &
(biomax != _IC_NODATA))
cercrp_above = numpy.empty(pra_1.shape, dtype=numpy.float32)
cercrp_above[:] = _TARGET_NODATA
cercrp_above[valid_mask] = numpy.minimum(
(pra_1[valid_mask] + (pra_2[valid_mask] - pra_1[valid_mask]) *
2.5 * aglivc[valid_mask] / biomax[valid_mask]),
pra_2[valid_mask])
return cercrp_above
def calc_below_ratio(prb_1, prb_2, annual_precip):
"""Calculate carbon to nutrient ratio for belowground material.
Parameters:
prb_1 (numpy.ndarray): parameter, intercept of regression
to predict ratio from annual precipitation
prb_2 (numpy.ndarray): parameter, slope of regression to
predict ratio from annual precipitation
annual_precip (numpy.ndarray): derived, precipitation in twelve
months including the current month
Returns:
cercrp_below, carbon to nutrient ratio for belowground
material
"""
valid_mask = (
(prb_1 != _IC_NODATA) &
(prb_2 != _IC_NODATA) &
(annual_precip != _TARGET_NODATA))
cercrp_below = numpy.empty(prb_1.shape, dtype=numpy.float32)
cercrp_below[:] = _TARGET_NODATA
cercrp_below[valid_mask] = (
prb_1[valid_mask] +
(prb_2[valid_mask] * annual_precip[valid_mask]))
return cercrp_below
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramn_1_path, pramn_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramx_1_path, pramx_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_max_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmn_1_path, prbmn_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmx_1_path, prbmx_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_max_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
def calc_revised_fracrc(
frtcindx_path, fracrc_p_path, totale_1_path, totale_2_path,
demand_1_path, demand_2_path, h2ogef_1_path, cfrtcw_1_path,
cfrtcw_2_path, cfrtcn_1_path, cfrtcn_2_path, fracrc_r_path):
"""
Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots includes the
impacts of water and nutrient limitation. The method of the
revised calculation depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 96-104, cropDynC.f, froota.f
Parameters:
frtcindx_path (string): path to raster containing the parameter
frtcindx
fracrc_p_path (string): path to raster containing provisional
fraction of carbon allocated to roots
totale_1_path (string): path to raster containing total available
nitrogen
totale_2_path (string): path to raster containing total available
phosphorus
demand_1_path (string): path to raster containing nitrogen demand
demand_2_path (string): path to raster containing phosphorus demand
h2ogef_1_path (string): path to raster containing the limiting
effect of water availability on growth
cfrtcw_1_path (string): path to raster containing the parameter
cfrtcw_1
cfrtcw_2_path (string): path to raster containing the parameter
cfrtcw_2
cfrtcn_1_path (string): path to raster containing the parameter
cfrtcn_1
cfrtcn_2_path (string): path to raster containing the parameter
cfrtcn_2
fracrc_r_path (string): path to raster that should contain the
result, revised fraction of carbon allocated to roots
Side effects:
creates the raster indicated by `fracrc_r_path`
Returns:
None
"""
def calc_a2drat(totale, demand):
"""Calculate the ratio of available nutrient to nutrient demand.
The ratio of nutrient available to demand for the nutrient is
restricted to be between 0 and 1.
Parameters:
totale (numpy.ndarray): derived, nutrient available
demand (numpy.ndarray): derived, demand for the nutrient
Returns:
a2drat, the ratio of available nutrient to demand, restricted
to be between 0 and 1
"""
valid_mask = (
(totale != _TARGET_NODATA) &
(demand != _TARGET_NODATA))
a2drat = numpy.empty(totale.shape, dtype=numpy.float32)
a2drat[:] = _TARGET_NODATA
demand_mask = ((demand > 0) & valid_mask)
a2drat[valid_mask] = 1.
a2drat[demand_mask] = numpy.clip(
totale[demand_mask] / demand[demand_mask], 0., 1.)
return a2drat
def calc_perennial_fracrc(
h2ogef, cfrtcw_1, cfrtcw_2, a2drat_1, a2drat_2, cfrtcn_1,
cfrtcn_2):
"""Calculate fraction C allocated to roots for a perennial plant.
The fraction of carbon allocated to roots is determined by
water availability, described by h2ogef, and nutrient availability,
described by a2drat_1 for nitrogen and a2drat_2 for phosphorus.
Lines 114-125 froota.f
Parameters:
h2ogef (numpy.ndarray): derived, the limiting factor of water
availability on growth
cfrtcw_1 (numpy.ndarray): parameter, the maximum fraction of
carbon allocated to roots with maximum water stress
cfrtcw_2 (numpy.ndarray): parameter, the minimum fraction of
carbon allocated to roots with no water stress
a2drat_1 (numpy.ndarray): derived, the ratio of available
nitrogen to nitrogen demand, restricted to be between 0
and 1
a2drat_2 (numpy.ndarray): derived, the ratio of available
phosphorus to phosphorus demand, restricted to be between
0 and 1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of
carbon allocated to roots with maximum nutrient stress
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of
carbon allocated to roots with no nutrient stress
Returns:
fracrc_perennial, revised fraction of C allocated to roots for
a perennial plant
"""
valid_mask = (
(h2ogef != _TARGET_NODATA) &
(cfrtcw_1 != _IC_NODATA) &
(cfrtcw_2 != _IC_NODATA) &
(a2drat_1 != _TARGET_NODATA) &
(a2drat_2 != _TARGET_NODATA) &
(cfrtcn_1 != _IC_NODATA) &
(cfrtcn_2 != _IC_NODATA))
h2oeff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
h2oeff[:] = _TARGET_NODATA
h2oeff[valid_mask] = (
(cfrtcw_2[valid_mask] - cfrtcw_1[valid_mask]) *
(h2ogef[valid_mask] - 1.) + cfrtcw_2[valid_mask])
ntreff_1 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_1[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_1[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff_2 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_2[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_2[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff[:] = _TARGET_NODATA
ntreff[valid_mask] = numpy.maximum(
ntreff_1[valid_mask], ntreff_2[valid_mask])
fracrc_perennial = numpy.empty(
h2ogef.shape, dtype=numpy.float32)
fracrc_perennial[:] = _TARGET_NODATA
fracrc_perennial[valid_mask] = numpy.minimum(
numpy.maximum(h2oeff[valid_mask], ntreff[valid_mask]), 0.99)
return fracrc_perennial
def revised_fracrc_op(frtcindx, fracrc_p, fracrc_perennial):
"""Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots is calculated
according to the parameter frtcindx. If frtcindx=0 (use the "Great
Plains equation"), the revised fraction is equal to the provisional
fraction. If frtcindx=1 (a perennial plant), the revised fraction
is calculated from water and nutrient stress.
Parameters:
frtcindx (numpy.ndarray): parameter, indicates whether revised
fraction of carbon allocated to roots should follow the
"Great Plains equation" or the algorithm for a perennial
plant
fracrc_p (numpy.ndarray): derived, provisional fraction of
carbon allocated to roots
fracrc_perennial (numpy.ndarray): derived, fraction of
carbon allocated to roots for a perennial plant
Returns:
fracrc_r, revised fraction of carbon allocated to roots
"""
valid_mask = (
(frtcindx != _IC_NODATA) &
(fracrc_p != _TARGET_NODATA) &
(fracrc_perennial != _TARGET_NODATA))
fracrc_r = numpy.empty(frtcindx.shape, dtype=numpy.float32)
fracrc_r[:] = _TARGET_NODATA
fracrc_r[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0, fracrc_p[valid_mask],
fracrc_perennial[valid_mask])
return fracrc_r
# temporary intermediate rasters for calculating revised fracrc
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in ['a2drat_1', 'a2drat_2', 'fracrc_perennial']:
temp_val_dict[val] = os.path.join(
temp_dir, '{}.tif'.format(val))
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_1_path, demand_1_path]],
calc_a2drat, temp_val_dict['a2drat_1'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_2_path, demand_2_path]],
calc_a2drat, temp_val_dict['a2drat_2'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
h2ogef_1_path, cfrtcw_1_path, cfrtcw_2_path,
temp_val_dict['a2drat_1'], temp_val_dict['a2drat_2'],
cfrtcn_1_path, cfrtcn_2_path]],
calc_perennial_fracrc, temp_val_dict['fracrc_perennial'],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
frtcindx_path, fracrc_p_path,
temp_val_dict['fracrc_perennial']]],
revised_fracrc_op, fracrc_r_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def grazing_effect_on_aboveground_production(tgprod, fracrc, flgrem, grzeff):
"""Adjust aboveground production with the impact of grazing.
Removal of biomass by herbivores directly impacts potential
aboveground production according to the amount of biomass removed
and the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0, 3, or 4, aboveground production is not
changed. If grzeff=1 or 6, production decreases linearly with
biomass removed; if grzeff=2 or 5, biomass removed has a quadratic
impact on production. Grazrst.f
Parameters:
tgprod (numpy.ndarray): derived, total potential biomass
production restricted by water and nutrient availability
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
Returns:
agprod, aboveground production impacted by grazing
"""
valid_mask = (
(tgprod != _TARGET_NODATA) &
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA))
agprod_prior = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod_prior[:] = _TARGET_NODATA
agprod_prior[valid_mask] = (
tgprod[valid_mask] * (1. - fracrc[valid_mask]))
linear_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
(1. - (2.21*flgrem[valid_mask])) * agprod_prior[valid_mask],
0.02)
quadratic_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = (
(1. + 2.6*flgrem[valid_mask] -
(5.83*(numpy.power(flgrem[valid_mask], 2)))) *
agprod_prior[valid_mask])
quadratic_effect[valid_mask] = numpy.maximum(
quadratic_effect[valid_mask], 0.02)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 3, 4]))
linear_mask = (valid_mask & numpy.isin(grzeff, [1, 6]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 5]))
agprod = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod[:] = _TARGET_NODATA
agprod[no_effect_mask] = agprod_prior[no_effect_mask]
agprod[linear_mask] = linear_effect[linear_mask]
agprod[quadratic_mask] = quadratic_effect[quadratic_mask]
return agprod
def grazing_effect_on_root_shoot(fracrc, flgrem, grzeff, gremb):
"""Adjust root:shoot ratio according to the impact of grazing.
Removal of biomass by herbivores directly impacts the root:shoot
ratio of production according to the amount of biomass removed and
the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0 or 1, the root:shoot ratio is not changed.
If grzeff=2 or 3, biomass removed has a quadratic impact on the
root:shoot ratio. If grzeff=4, 5, or 6, biomass removed has a
linear effect on the root:shoot ratio. The parameter gremb
multiplies the linear impact of grazing when grzeff=4, 5 or 6.
Grzrst.f
Parameters:
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
grzemb (numpy.ndarray): parameter, grazing effect multiplier
Returns:
rtsh, root:shoot ratio impacted by grazing
"""
valid_mask = (
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA) &
(gremb != _IC_NODATA))
rtsh_prior = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh_prior[:] = _TARGET_NODATA
rtsh_prior[valid_mask] = (
fracrc[valid_mask] / (1. - fracrc[valid_mask]))
quadratic_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = numpy.maximum(
rtsh_prior[valid_mask] + 3.05 * flgrem[valid_mask] -
11.78 * numpy.power(flgrem[valid_mask], 2),
0.01)
linear_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
1. - (flgrem[valid_mask] * gremb[valid_mask]),
0.01)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 1]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 3]))
linear_mask = (valid_mask & numpy.isin(grzeff, [4, 5, 6]))
rtsh = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[no_effect_mask] = rtsh_prior[no_effect_mask]
rtsh[quadratic_mask] = quadratic_effect[quadratic_mask]
rtsh[linear_mask] = linear_effect[linear_mask]
return rtsh
def calc_tgprod_final(rtsh, agprod):
"""Calculate final total potential production.
Final total potential production is calculated from aboveground
production impacted by grazing and the final root:shoot ratio
impacted by grazing.
Parameters:
rtsh (numpy.ndarray): derived, final root:shoot ratio impacted
by grazing
agprod (numpy.ndarray): derived, final aboveground potential
production impacted by grazing
Returns:
tgprod, final total potential production
"""
valid_mask = (
(rtsh != _TARGET_NODATA) &
(agprod != _TARGET_NODATA))
tgprod = numpy.empty(rtsh.shape, dtype=numpy.float32)
tgprod[:] = _TARGET_NODATA
tgprod[valid_mask] = (
agprod[valid_mask] + (rtsh[valid_mask] * agprod[valid_mask]))
return tgprod
def calc_final_tgprod_rtsh(
tgprod_pot_prod_path, fracrc_path, flgrem_path, grzeff_path,
gremb_path, tgprod_path, rtsh_path):
"""Calculate final potential production and root:shoot ratio.
Final potential production and root:shoot ratio include the impact of
grazing. First calculate final aboveground production including the
impact of grazing; then calculate rtsh, the final root:shoot ratio
including the impact of grazing; then calculate tgprod, final total
potential production, from final aboveground production and final
root:shoot ratio. Grazrst.f
Parameters:
tgprod_pot_prod_path (string): path to raster containing total
potential biomass production restricted by water and nutrient
availability, prior to effects of grazing
fracrc_path (string): path to raster containing the fraction of
carbon production allocated to roots according to restriction
by water and nutrient availability, prior to effects of
grazing
flgrem_path (string): path to raster containing the fraction of
live aboveground biomass removed by herbivores according to
diet selection in the previous step
grzeff_path (string): path to raster containing the parameter
grzeff, the effect of defolation on production and root:shoot
ratio
gremb_path (string): path to raster containing the parameter
gremb, the grazing effect multiplier
tgprod_path (string): path to raster containing final total
potential production (g biomass)
rtsh_path (string): path to raster containing final root:shoot
ratio of potential production
Side effects:
creates the raster indicated by tgprod_path
creates the raster indicated by rtsh_path
Returns:
None
"""
# temporary intermediate rasters for grazing effect
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
agprod_path = os.path.join(temp_dir, 'agprod.tif')
# grazing effect on aboveground production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tgprod_pot_prod_path, fracrc_path, flgrem_path,
grzeff_path]],
grazing_effect_on_aboveground_production,
agprod_path, gdal.GDT_Float32, _TARGET_NODATA)
# grazing effect on final root:shoot ratio
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
fracrc_path, flgrem_path, grzeff_path, gremb_path]],
grazing_effect_on_root_shoot, rtsh_path,
gdal.GDT_Float32, _TARGET_NODATA)
# final total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [rtsh_path, agprod_path]],
calc_tgprod_final, tgprod_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg):
"""Calculate final potential production and root:shoot ratio.
Final potential biomass production and root:shoot ratio is calculated
according to nutrient availability and demand for the nutrient, and the
impact of defoliation by herbivores. CropDynC.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including the site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
current_month (int): month of the year, such that current_month=1
indicates January
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
year_reg (dict): map of key, path pairs giving paths to rasters that
are modified once per year, including annual precipitation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by
`month_reg['tgprod_<PFT>']`, total potential production (g biomass)
for each plant functional type (PFT)
creates the raster indicated by `month_reg['rtsh_<PFT>']` for each
plant functional type (PFT)
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
# growth occurs in growth months and when senescence not scheduled
do_growth = (
current_month != veg_trait_table[pft_i]['senescence_month'] and
str(current_month) in veg_trait_table[pft_i]['growth_months'])
if do_growth:
do_PFT.append(pft_i)
if not do_PFT:
return
# temporary intermediate rasters for root:shoot submodel
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in do_PFT:
for val in ['fracrc_p', 'fracrc', 'availm']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
for iel in [1, 2]:
for val in ['eavail', 'demand']:
temp_val_dict[
'{}_{}_{}'.format(val, iel, pft_i)] = os.path.join(
temp_dir, '{}_{}_{}.tif'.format(val, iel, pft_i))
# temporary parameter rasters for root:shoot submodel
param_val_dict = {}
# site-level parameters
for val in [
'bgppa', 'bgppb', 'agppa', 'agppb', 'favail_1', 'favail_4',
'favail_5', 'favail_6']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for pft_i in do_PFT:
for val in [
'frtcindx', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'biomax', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'grzeff', 'gremb']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
for val in [
'pramn_1_1', 'pramn_1_2', 'pramx_1_1', 'pramx_1_2',
'prbmn_1_1', 'prbmn_1_2', 'prbmx_1_1', 'prbmx_1_2',
'pramn_2_1', 'pramn_2_2', 'pramx_2_1', 'pramx_2_2',
'prbmn_2_1', 'prbmn_2_2', 'prbmx_2_1', 'prbmx_2_2']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict[
'{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path,
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
# the parameter favail_2 must be calculated from current mineral N in
# surface layer
param_val_dict['favail_2'] = os.path.join(temp_dir, 'favail_2.tif')
_calc_favail_P(prev_sv_reg, param_val_dict)
for pft_i in do_PFT:
# fracrc_p, provisional fraction of C allocated to roots
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
year_reg['annual_precip_path'],
param_val_dict['frtcindx_{}'.format(pft_i)],
param_val_dict['bgppa'],
param_val_dict['bgppb'],
param_val_dict['agppa'],
param_val_dict['agppb'],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)]]],
calc_provisional_fracrc,
temp_val_dict['fracrc_p_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
for iel in [1, 2]:
# persistent ratios used here and in plant growth submodel
calc_ce_ratios(
param_val_dict['pramn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramn_{}_2_{}'.format(iel, pft_i)],
prev_sv_reg['aglivc_{}_path'.format(pft_i)],
param_val_dict['biomax_{}'.format(pft_i)],
param_val_dict['pramx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramx_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_2_{}'.format(iel, pft_i)],
year_reg['annual_precip_path'], pft_i, iel, month_reg)
# sum of mineral nutrient in accessible soil layers
_calc_avail_mineral_nutrient(
veg_trait_table[pft_i], prev_sv_reg, iel,
temp_val_dict['availm_{}'.format(pft_i)])
# eavail_iel, available nutrient
_calc_available_nutrient(
pft_i, iel, veg_trait_table[pft_i], prev_sv_reg,
site_param_table, aligned_inputs['site_index'],
temp_val_dict['availm_{}'.format(pft_i)],
param_val_dict['favail_{}'.format(iel)],
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['eavail_{}_{}'.format(iel, pft_i)])
# demand_iel, demand for the nutrient
_calc_nutrient_demand(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
temp_val_dict['demand_{}_{}'.format(iel, pft_i)])
# revised fraction of carbon allocated to roots
calc_revised_fracrc(
param_val_dict['frtcindx_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
temp_val_dict['eavail_1_{}'.format(pft_i)],
temp_val_dict['eavail_2_{}'.format(pft_i)],
temp_val_dict['demand_1_{}'.format(pft_i)],
temp_val_dict['demand_2_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)])
# final potential production and root:shoot ratio accounting for
# impacts of grazing
calc_final_tgprod_rtsh(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)],
param_val_dict['grzeff_{}'.format(pft_i)],
param_val_dict['gremb_{}'.format(pft_i)],
month_reg['tgprod_{}'.format(pft_i)],
month_reg['rtsh_{}'.format(pft_i)])
# clean up temporary files
shutil.rmtree(temp_dir)
def _snow(
site_index_path, site_param_table, precip_path, tave_path,
max_temp_path, min_temp_path, prev_snow_path, prev_snlq_path,
current_month, snowmelt_path, snow_path, snlq_path,
inputs_after_snow_path, pet_rem_path):
"""Account for precipitation as snow and snowmelt from snowpack.
Determine whether precipitation falls as snow. Track the fate of
new and existing snowpack including evaporation and melting. Track the
the remaining snowpack and liquid in snow and potential
evapotranspiration remaining after evaporation of snow. Snowcent.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
precip_path (string): path to raster containing precipitation for the
current month
tave_path (string): path to raster containing average temperature for
the current month
max_temp_path (string): path to raster containing maximum temperature
for the current month
min_temp_path (string): path to raster containing minimum temperature
for the current month
prev_snow_path (string): path to raster containing current snowpack
prev_snlq_path (string): path to raster containing current liquid in
snow
current_month (int): current month of the year, such that month=0
indicates January
snow_path (string): path to raster to contain modified snowpack
snlq_path (string): path to raster to contain modified liquid in snow
inputs_after_snow_path (string): path to raster containing water inputs
to the system after accounting for snow
pet_rem_path (string): path to raster containing potential
evapotranspiration remaining after any evaporation of snow
Side effects:
creates the raster indicated by `snowmelt_path`
creates the raster indicated by `snow_path`
creates the raster indicated by `snlq_path`
creates the raster indicated by `inputs_after_snow_path`
creates the raster indicated by `pet_rem_path`
Returns:
None
"""
def calc_snow_moisture(return_type):
"""Calculate change in snow, pet, snow liquid, and moisture inputs.
Record changes in snowpack, liquid in snow, potential
evapotranspiration energy, and liquid draining into soil from snow.
Parameters:
return_type (string): flag indicating whether modified snowpack,
modified liquid in snow, modified potential evapotranspiration,
or soil moisture inputs after snow should be returned
Returns:
the function `_calc_snow_moisture`
"""
def _calc_snow_moisture(
tave, precip, snow, snlq, pet, tmelt_1, tmelt_2, shwave):
"""Calculate the fate of moisture from snow.
Calculate new snowfall or rain on snow. Calculate direct
evaporation of snow and consumption of potential
evapotranspiration energy. Calculate snowmelt and liquid draining
from snow into the soil.
Parameters:
tave (numpy.ndarray): derived, average temperature
precip (numpy.ndarray): input, precipitation for this month
snow (numpy.ndarray): derived, existing snowpack prior to new
snowfall
snlq (numpy.ndarray): derived, existing liquid in snowpack
pet (numpy.ndarray): derived, potential evapotranspiration
tmelt_1 (numpy.ndarray): parameter, minimum temperature above
which snow will melt
tmelt_2 (numpy.ndarray): parameter, ratio between degrees above
the minimum temperature and cm of snow that will melt
shwave (numpy.ndarray): derived, shortwave radiation outside
the atmosphere
Returns:
snowmelt if return_type is 'snowmelt'
snow_revised if return_type is 'snow'
snlq_revised if return_type is 'snlq'
pet_revised if return_type is 'pet'
inputs_after_snow if return_type is 'inputs_after_snow'
"""
valid_mask = (
(tave != _IC_NODATA) &
(~numpy.isclose(precip, precip_nodata)) &
(~numpy.isclose(snow, _SV_NODATA)) &
(~numpy.isclose(snlq, _SV_NODATA)) &
(pet != _TARGET_NODATA) &
(tmelt_1 != _IC_NODATA) &
(tmelt_2 != _IC_NODATA) &
(shwave != _TARGET_NODATA))
inputs_after_snow = numpy.empty(precip.shape, dtype=numpy.float32)
inputs_after_snow[:] = _TARGET_NODATA
inputs_after_snow[valid_mask] = precip[valid_mask]
snowfall_mask = (valid_mask & (tave <= 0))
snow[snowfall_mask] = (snow[snowfall_mask] + precip[snowfall_mask])
inputs_after_snow[snowfall_mask] = 0.
rain_on_snow_mask = (
(valid_mask) &
(tave > 0) &
(snow > 0))
snlq[rain_on_snow_mask] = (
snlq[rain_on_snow_mask] + precip[rain_on_snow_mask])
inputs_after_snow[rain_on_snow_mask] = 0.
snowtot = numpy.zeros(snow.shape, dtype=numpy.float32)
snowtot[valid_mask] = numpy.maximum(
snow[valid_mask] + snlq[valid_mask], 0)
evap_mask = (valid_mask & (snowtot > 0.))
evsnow = numpy.zeros(snow.shape, dtype=numpy.float32)
evsnow[evap_mask] = numpy.minimum(
snowtot[evap_mask], pet[evap_mask] * 0.87)
snow_revised = numpy.empty(snow.shape, dtype=numpy.float32)
snow_revised[:] = _TARGET_NODATA
snow_revised[valid_mask] = snow[valid_mask]
snow_revised[evap_mask] = numpy.maximum(
snow[evap_mask] - evsnow[evap_mask] *
(snow[evap_mask] / snowtot[evap_mask]), 0.)
snlq_revised = | numpy.zeros(snow.shape, dtype=numpy.float32) | numpy.zeros |
import numpy as np
from scipy.sparse import csr_matrix, identity, kron
from scipy.sparse.linalg import eigs, eigsh
import itertools
from scipy.linalg import block_diag, eig, expm, eigh
from scipy.sparse import save_npz, load_npz, csr_matrix, csc_matrix
import yaml
import copy
import warnings
import os
class ManualL:
def __init__(self,L,mu_ket_up,*,output='uf2',savedir=''):
self.L = L
self.mu_ket_up = mu_ket_up
self.output = output
if savedir=='':
savedir = os.getcwd()
self.base_path = os.path.join(savedir,output)
os.makedirs(self.base_path,exist_ok=True)
if output == 'uf2':
self.eigfun(self.L)
self.save_eigensystem(self.base_path)
if len(mu_ket_up.shape) == 2:
self.save_RWA_mu(self.base_path)
elif len(mu_ket_up.shape) == 3:
self.save_RWA_mu3D(self.base_path)
elif output == 'RKE':
self.save_L(self.base_path)
self.save_RWA_mu_site_basis(self.base_path)
def save_L(self,dirname):
save_npz(os.path.join(dirname,'L.npz'),csr_matrix(self.L))
def eigfun(self,L,*,check_eigenvectors = True,invert = True,populations_only = False):
eigvals, eigvecs = np.linalg.eig(L)
eigvals = np.round(eigvals,12)
sort_indices = eigvals.argsort()
eigvals.sort()
eigvecs = eigvecs[:,sort_indices]
for i in range(eigvals.size):
max_index = np.argmax(np.abs(eigvecs[:,i]))
if np.real(eigvecs[max_index,i]) < 0:
eigvecs[:,i] *= -1
if eigvals[i] == 0:
# eigenvalues of 0 correspond to thermal distributions,
# which should have unit trace in the Hamiltonian space
if populations_only:
trace_norm = eigvecs[:,i].sum()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
else:
shape = int(np.sqrt(eigvals.size))
trace_norm = eigvecs[:,i].reshape(shape,shape).trace()
eigvecs[:,i] = eigvecs[:,i] / trace_norm
if invert:
eigvecs_left = np.linalg.pinv(eigvecs)
else:
eigvals_left, eigvecs_left = np.linalg.eig(L.T)
eigvals_left = np.round(eigvals_left,12)
sort_indices_left = eigvals_left.argsort()
eigvals_left.sort()
eigvecs_left = eigvecs_left[:,sort_indices_left]
eigvecs_left = eigvecs_left.T
for i in range(eigvals_left.size):
norm = np.dot(eigvecs_left[i,:],eigvecs[:,i])
eigvecs_left[i,:] *= 1/norm
if check_eigenvectors:
LV = L.dot(eigvecs)
D = eigvecs_left.dot(LV)
if np.allclose(D,np.diag(eigvals),rtol=1E-10,atol=1E-10):
pass
else:
warnings.warn('Using eigenvectors to diagonalize Liouvillian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))
self.eigenvalues = eigvals
self.eigenvectors = {'left':eigvecs_left,'right':eigvecs}
return eigvals, eigvecs, eigvecs_left
def save_eigensystem(self,dirname):
np.savez(os.path.join(dirname,'right_eigenvectors.npz'),all_manifolds = self.eigenvectors['right'])
np.savez(os.path.join(dirname,'left_eigenvectors.npz'),all_manifolds = self.eigenvectors['left'])
np.savez(os.path.join(dirname,'eigenvalues.npz'),all_manifolds = self.eigenvalues)
def mu3D_eigentransform(self,mu):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
mu_t = np.zeros(mu.shape,dtype='complex')
for i in range(3):
mu_t[:,:,i] = np.dot(np.dot(evl,mu[:,:,i]),ev)
return mu_t
def mask_mu3D(self,mu):
mu_mask_tol = 10
mu_mask = np.zeros(mu.shape[:2],dtype='bool')
mu_abs = np.sqrt(np.sum(np.abs(mu)**2,axis=2))
mu_mask[:,:] = np.round(mu_abs,mu_mask_tol)[:,:]
mu_masked = mu * mu_mask[:,:,np.newaxis]
return mu_mask, mu_masked
def save_RWA_mu3D(self,dirname,*,mask=True):
H_size = self.mu_ket_up.shape[0]
mu_dtype= self.mu_ket_up.dtype
L_size = H_size**2
II = np.eye(H_size)
mu_ket_up = np.zeros((L_size,L_size,3),dtype=mu_dtype)
mu_ket_down = np.zeros((L_size,L_size,3),dtype=mu_dtype)
mu_bra_up = np.zeros((L_size,L_size,3),dtype=mu_dtype)
mu_bra_down = np.zeros((L_size,L_size,3),dtype=mu_dtype)
for i in range(3):
mu_ket_up[:,:,i] = np.kron(self.mu_ket_up[:,:,i],II.T)
mu_ket_down[:,:,i] = np.kron(np.conjugate(self.mu_ket_up[:,:,i].T),II.T)
mu_bra_up[:,:,i] = np.kron(II,np.conjugate(self.mu_ket_up[:,:,i]))
mu_bra_down[:,:,i] = np.kron(II,self.mu_ket_up[:,:,i].T)
mu_ket_up_t = self.mu3D_eigentransform(mu_ket_up)
mu_ket_down_t = self.mu3D_eigentransform(mu_ket_down)
mu_bra_up_t = self.mu3D_eigentransform(mu_bra_up)
mu_bra_down_t = self.mu3D_eigentransform(mu_bra_down)
np.savez(os.path.join(dirname,'mu.npz'),ket_up=mu_ket_up_t,bra_up=mu_bra_up_t,
ket_down=mu_ket_down_t,bra_down=mu_bra_down_t)
if mask:
ket_up_t_mask, mu_ket_up_t_masked = self.mask_mu3D(mu_ket_up_t)
ket_down_t_mask, mu_ket_down_t_masked = self.mask_mu3D(mu_ket_down_t)
bra_up_t_mask, mu_bra_up_t_masked = self.mask_mu3D(mu_bra_up_t)
bra_down_t_mask, mu_bra_down_t_masked = self.mask_mu3D(mu_bra_down_t)
np.savez(os.path.join(dirname,'mu_boolean.npz'),ket_up=ket_up_t_mask,bra_up=bra_up_t_mask,
ket_down=ket_down_t_mask,bra_down=bra_down_t_mask)
np.savez(os.path.join(dirname,'mu_pruned.npz'),ket_up=mu_ket_up_t_masked,
bra_up=mu_bra_up_t_masked,ket_down=mu_ket_down_t_masked,
bra_down=mu_bra_down_t_masked)
def save_RWA_mu(self,dirname,*,mask=True):
evl = self.eigenvectors['left']
ev = self.eigenvectors['right']
II = np.eye(self.mu_ket_up.shape[0])
mu_ket_up = np.kron(self.mu_ket_up,II.T)
mu_ket_down = np.kron(np.conjugate(self.mu_ket_up.T),II.T)
mu_bra_up = np.kron(II,np.conjugate(self.mu_ket_up))
mu_bra_down = np.kron(II,self.mu_ket_up.T)
mu_mask_tol = 10
mu_ket_up_t = np.dot(np.dot(evl,mu_ket_up),ev)
mu_ket_up_3d = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d[:,:,0] = mu_ket_up_t
mu_bra_up_t = np.dot(np.dot(evl,mu_bra_up),ev)
mu_bra_up_3d = np.zeros((mu_bra_up_t.shape[0],mu_bra_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d[:,:,0] = mu_bra_up_t
mu_ket_down_t = np.dot(np.dot(evl,mu_ket_down),ev)
mu_ket_down_3d = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d[:,:,0] = mu_ket_down_t
mu_bra_down_t = np.dot(np.dot(evl,mu_bra_down),ev)
mu_bra_down_3d = np.zeros((mu_bra_down_t.shape[0],mu_bra_down_t.shape[0],3),dtype='complex')
mu_bra_down_3d[:,:,0] = mu_bra_down_t
if mask:
ket_up_mask = np.zeros(mu_ket_up_t.shape,dtype='bool')
ket_up_mask[:,:] = np.round(mu_ket_up_t,mu_mask_tol)[:,:]
mu_ket_up_t_masked = mu_ket_up_t * ket_up_mask
mu_ket_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_ket_up_3d_masked[:,:,0] = mu_ket_up_t_masked
bra_up_mask = np.zeros(mu_bra_up_t.shape,dtype='bool')
bra_up_mask[:,:] = np.round(mu_bra_up_t,mu_mask_tol)[:,:]
mu_bra_up_t_masked = mu_bra_up_t * bra_up_mask
mu_bra_up_3d_masked = np.zeros((mu_ket_up_t.shape[0],mu_ket_up_t.shape[0],3),dtype='complex')
mu_bra_up_3d_masked[:,:,0] = mu_bra_up_t_masked
ket_down_mask = np.zeros(mu_ket_down_t.shape,dtype='bool')
ket_down_mask[:,:] = np.round(mu_ket_down_t,mu_mask_tol)[:,:]
mu_ket_down_t_masked = mu_ket_down_t * ket_down_mask
mu_ket_down_3d_masked = np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex')
mu_ket_down_3d_masked[:,:,0] = mu_ket_down_t_masked
bra_down_mask = np.zeros(mu_bra_down_t.shape,dtype='bool')
bra_down_mask[:,:] = np.round(mu_bra_down_t,mu_mask_tol)[:,:]
mu_bra_down_t_masked = mu_bra_down_t * bra_down_mask
mu_bra_down_3d_masked = | np.zeros((mu_ket_down_t.shape[0],mu_ket_down_t.shape[0],3),dtype='complex') | numpy.zeros |
"""
Programming Assignment 2
To be submitted via canvas, just as Programming Assignment 1
This program builds a two-layer neural network for the Iris dataset.
The first layer is a relu layer with 10 units, and the second one is
a softmax layer. The network structure is specified in the "train" function.
The parameters are learned using SGD. The forward propagation and backward
propagation are carried out in the "compute_neural_net_loss" function. The codes
for the propagations are deleted. Your task is to fill in the missing codes.
"""
# In this exercise, we are going to work with a two-layer neural network
# first layer is a relu layer with 10 units, and second one is a softmax layer.
# randomly initialize parameters
import numpy as np
import os, sys
import math
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TEST = "iris_test.csv"
def get_data():
# Load datasets.
train_data = np.genfromtxt(IRIS_TRAINING,
skip_header=1,
dtype=float,
delimiter=',')
test_data = np.genfromtxt(IRIS_TEST,
skip_header=1,
dtype=float,
delimiter=',')
train_x = train_data[:, :4]
train_y = train_data[:, 4].astype(np.int64)
test_x = test_data[:, :4]
test_y = test_data[:, 4].astype(np.int64)
return train_x, train_y, test_x, test_y
def compute_neural_net_loss(params, X, y, reg=0.0):
"""
Neural network loss function.
Inputs:
- params: dictionary of parameters, including "W1", "b1", "W2", "b2"
- X: N x D array of training data. Each row is a D-dimensional point.
- y: 1-d array of shape (N, ) for the training labels.
Returns:
- loss: the softmax loss with regularization
- grads: dictionary of gradients for the parameters in params
"""
# Unpack variables from the params dictionary
assert y is not None
W1, b1 = params['W1'], params['b1']
W2, b2 = params['W2'], params['b2']
N, D = X.shape
loss = 0.0
grads = {}
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. So that your results match ours, multiply the #
# regularization loss by 0.5 #
#############################################################################
z = np.dot(X, W1) + b1
h = np.maximum(z, 0)
scores = np.dot(h, W2) + b2
out = np.exp(scores)
out /= np.sum(out, axis=1, keepdims=True).reshape(N, 1)
# compute softmax loss
loss -= np.sum(np.log(out[np.arange(N), y]))
loss /= N
loss += 0.5 * reg * (np.sum(W1**2) + np.sum(W2**2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# back propagation
dout = np.copy(out) # (N, C)
dout[np.arange(N), y] -= 1
dh = np.dot(dout, W2.T)
dh[h <= 0] = 0
# compute gradient for parameters
grads['W2'] = np.dot(h.T, dout) / N # (H, C)
db2 = np.sum(dout, axis=0) / N # (C,)
grads['W1'] = np.dot(X.T, dh) / N # (D, H)
db1 = | np.sum(dh, axis=0) | numpy.sum |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = | np.random.randint(1, 4, [shape.rows, shape.cols]) | numpy.random.randint |
import sys
import numpy as np
import pandas as pd
from optparse import OptionParser
import os
from scipy.stats import entropy
from scipy import signal
import scipy.stats as spstats
import fnmatch
from datetime import datetime
from scipy.stats import skew
from scipy.stats import kurtosis
from scipy.stats import t
from scipy.optimize import fsolve
import scipy.special as sc
# Extracts aggregate features per run from raw eye tracking and oculomotor event data, and builds a single feature matrix for use as input to train and validate a predictive model. If the feature matrix file already exists from a prior run of getFeatureMatrix(), you can save time by specifying useExisting=True to load it directly from the file rather than recomputing it from scratch.
# Research was sponsored by the United States Air Force Research Laboratory and the
# United States Air Force Artificial Intelligence Accelerator and was accomplished
# under Cooperative Agreement Number FA8750-19-2-1000. The views and conclusions
# contained in this document are those of the authors and should not be interpreted
# as representing the official policies, either expressed or implied, of the United
# States Air Force or the U.S. Government. The U.S. Government is authorized to
# reproduce and distribute reprints for Government purposes notwithstanding any
# copyright notation herein.
# def main():
# parser = OptionParser()
# parser.add_option('-d', '--dataDir', action="store", dest="dataDir", default=None, help="The top level data directory containing all the raw signal files for each subject.")
# parser.add_option('-o', '--outFilePath', action="store", dest="outFilePath", default=None, help="File to write full feature matrix.");
# (options, args) = parser.parse_args()
# getFeatureMatrix(options.dataDir, options.outFilePath);
def getFeatureMatrix(dataDir, filePath, useExisting):
if useExisting:
if os.path.exists(filePath):
print("Found precomputed feature matrix.")
featMatDF = pd.read_csv(filePath)
print("Loaded into a dataFrame.")
return featMatDF
else:
print(
"Cannot use existing feature matrix because specified file was not found. Recomputing it from scratch."
)
subjDirs = [f.path for f in os.scandir(dataDir) if f.is_dir()]
dfHeader = [
"Subject",
"Session",
"Run",
"OverallGazeEntropyLX",
"psdMaxLX",
"psdFreqOfMaxLX",
"OverallGazeEntropyLY",
"psdMaxLY",
"psdFreqOfMaxLY",
"OverallGazeEntropyLZ",
"psdMaxLZ",
"psdFreqOfMaxLZ",
"OverallGazeEntropyRX",
"psdMaxRX",
"psdFreqOfMaxRX",
"OverallGazeEntropyRY",
"psdMaxRY",
"psdFreqOfMaxRY",
"OverallGazeEntropyRZ",
"psdMaxRZ",
"psdFreqOfMaxRZ",
"EyesClosedFractionL",
"EyesClosedFractionR",
"PupilDiamMeanL",
"PupilDiamStdevL",
"PupilDiamSkewL",
"PupilDiamKurtL",
"PupilDiamMeanR",
"PupilDiamStdevR",
"PupilDiamSkewR",
"PupilDiamKurtR",
"FixDurMean",
"FixDurStdev",
"FixDurSkew",
"FixDurKurt",
"FixDensityMean",
"FixDensityStdev",
"FixDensitySkew",
"FixDensityKurt",
"SacMainSeqMean",
"SacMainSeqStdev",
"SacPeakVelMean",
"SacPeakVelStdev",
]
# walks through the directory structure of the raw data
featMat = []
ctr = 1
for subjd in subjDirs:
sessDirs = [f.path for f in os.scandir(subjd) if f.is_dir()]
print(
"Processing subject "
+ str(ctr)
+ " of "
+ str(len(subjDirs))
+ ": "
+ os.path.basename(subjd)
)
ctr = ctr + 1
for sessd in sessDirs:
runDirs = [f.path for f in os.scandir(sessd) if f.is_dir()]
for rund in runDirs:
dataFiles = [f.path for f in os.scandir(rund) if f.is_file()]
toks = rund.split(os.path.sep)
subj = toks[-3]
sess = toks[-2]
run = toks[-1]
rawEyeFile = fnmatch.filter(dataFiles, "*lslhtcviveeye*.csv")
dfraw = pd.read_csv(rawEyeFile[0])
timeStr = dfraw["time_s"]
datalen = len(timeStr)
if datalen < 10:
continue
# if there is even one corrupted date-time string, skip this whole run.
try:
timesMillis = [convertTimeStrToMillis(f) for f in timeStr]
except ValueError:
print(
"corrupted timestamp string, skipping run = "
+ run
+ ", subj = "
+ subj
+ ", sess = "
+ sess
)
continue
ocuEvtsFile = fnmatch.filter(dataFiles, "*_ocuevts_*.csv")
if len(ocuEvtsFile) < 1:
print(
"No oculomotor events file found for run "
+ run
+ ", subj = "
+ subj
+ ", sess = "
+ sess
)
continue
try:
dfocu = pd.read_csv(ocuEvtsFile[0])
except pd.errors.EmptyDataError:
print("Empty oculomotor events file. Skipping.")
continue
if dfocu.shape[0] < 10:
continue
gazeFeats = extractRawEyeFeats(dfraw, timesMillis)
ocuEvtFeats = extractOcuEvtFeats(dfraw, dfocu)
dfrow = [subj, sess, run]
dfrow = dfrow + gazeFeats + ocuEvtFeats
featMat.append(dfrow)
featMatDF = pd.DataFrame(featMat, columns=dfHeader)
if filePath != None:
print("Saving feature matrix to file.")
featMatDF.to_csv(filePath)
print("Processing complete. Returning feature matrix data frame.")
return featMatDF
def convertTimeStrToMillis(timeStr):
millisec = float(timeStr) * 1000
return millisec
def extractRawEyeFeats(df, timesMillis):
diffTimes = np.diff(timesMillis)
fs = np.mean(1.0 / diffTimes)
gazeFeatsLX = getSpectralFeatures(df["gaze_direction_l_x_mm"].values, fs)
gazeFeatsLY = getSpectralFeatures(df["gaze_direction_l_y_mm"].values, fs)
gazeFeatsLZ = getSpectralFeatures(df["gaze_direction_l_z_mm"].values, fs)
gazeFeatsRX = getSpectralFeatures(df["gaze_direction_r_x_mm"].values, fs)
gazeFeatsRY = getSpectralFeatures(df["gaze_direction_r_y_mm"].values, fs)
gazeFeatsRZ = getSpectralFeatures(df["gaze_direction_r_z_mm"].values, fs)
eyeClosedFracL = getFracTimeEyeClosed(df["eye_openness_l"].values, 0.5)
eyeClosedFracR = getFracTimeEyeClosed(df["eye_openness_r"].values, 0.5)
pupilFeatsL = getPupilFeatures(df["pupil_diameter_l_mm"].values)
pupilFeatsR = getPupilFeatures(df["pupil_diameter_r_mm"].values)
eyeFeatsAll = (
gazeFeatsLX
+ gazeFeatsLY
+ gazeFeatsLZ
+ gazeFeatsRX
+ gazeFeatsRY
+ gazeFeatsRZ
+ [eyeClosedFracL, eyeClosedFracR]
+ pupilFeatsL
+ pupilFeatsR
)
return eyeFeatsAll
def getFracTimeEyeClosed(eoSignal, thresh):
closedSamples = np.where(eoSignal <= thresh)
timeEyeClosed = np.max(closedSamples[0].shape)
fracTimeEyeClosed = timeEyeClosed / np.max(eoSignal.shape)
return fracTimeEyeClosed
def getPupilFeatures(pdSignal):
meanPD = np.mean(pdSignal)
stdevPD = np.std(pdSignal)
skewPD = skew(pdSignal)
kurtPD = kurtosis(pdSignal)
return [meanPD, stdevPD, skewPD, kurtPD]
# compute the overall spectral entropy of a continuous-valued signal, as well as the peak of the power spectral density.
def getSpectralFeatures(rawSignal, fs):
f, psd = signal.welch(rawSignal, fs)
# defaults to: hanning window, 256 samples per segment. Returns mean across segments.
psdMaxPower = max(psd)
psdMaxPowerIdx = np.argmax(psd)
psdFreqOfMax = f[psdMaxPowerIdx]
# Spectral Entropy is defined to be the Shannon Entropy of the power spectral density of the data.
# Step 1: Normalize the PSD by dividing it by the total PSD sum
normPSD = psd / sum(psd)
# Step 2: Calculate power spectral entropy
overallEntropy = -np.sum(normPSD * np.log2(psd))
return [overallEntropy, psdMaxPower, psdFreqOfMax]
# Extract features pertaining to oculomotor events: fixations and saccades.
def extractOcuEvtFeats(dfRawEye, dfOcuEvt):
eoSignal = dfRawEye["eye_openness_l"].values
eyeOpenIdxs = np.where(eoSignal > 0.5)
eoIdxs = eyeOpenIdxs[0]
fixFeats = getFixationFeats(dfOcuEvt, dfRawEye, eoIdxs[:-2])
sacFeats = getSaccadeFeats(dfOcuEvt, dfRawEye, eoIdxs[:-2])
return fixFeats + sacFeats
# Computes aggregate statistics of fixation durations and densities across each run.
def getFixationFeats(dfOcuEvt, dfRawEye, eoIdxs):
fixSeq = dfOcuEvt["FixationSeq"].values
timeCol = dfOcuEvt["Timestamp"].values
gazeX = dfRawEye["gaze_direction_l_x_mm"].values
gazeY = dfRawEye["gaze_direction_l_y_mm"].values
gazeZ = dfRawEye["gaze_direction_l_z_mm"].values
fixEO = fixSeq[eoIdxs]
timeEO = timeCol[eoIdxs]
fixDurs = getFixDurations(fixEO, timeEO)
fixDens = getFixDensities(
fixEO, timeEO, gazeX[eoIdxs], gazeY[eoIdxs], gazeZ[eoIdxs]
)
meanFixDur = np.mean(fixDurs)
stdevFixDur = np.std(fixDurs)
skewFixDur = skew(fixDurs)
kurtFixDur = kurtosis(fixDurs)
meanFixDen = np.mean(fixDens)
stdevFixDen = np.std(fixDens)
skewFixDen = skew(fixDens)
kurtFixDen = kurtosis(fixDens)
return [
meanFixDur,
stdevFixDur,
skewFixDur,
kurtFixDur,
meanFixDen,
stdevFixDen,
skewFixDen,
kurtFixDen,
]
def getFixDurations(fixEO, timeEO):
fixDurs = []
uniqueFixNums = set(fixEO)
for fn in uniqueFixNums:
if fn < 0:
continue
curFixIdxsArr = np.where(fixEO == fn)
curFixIdxs = curFixIdxsArr[0]
curFixTimes = timeEO[curFixIdxs]
startTime = curFixTimes[0]
endTime = curFixTimes[-1]
curFixDur = endTime - startTime
fixDurs.append(curFixDur)
return fixDurs
def getFixDensities(fixEO, timeEO, gazeEOx, gazeEOy, gazeEOz):
fixDens = []
uniqueFixNums = set(fixEO)
for fn in uniqueFixNums:
if fn < 0:
continue
curFixIdxsArr = np.where(fixEO == fn)
curFixIdxs = curFixIdxsArr[0]
curFixTimes = timeEO[curFixIdxs]
startTime = curFixTimes[0]
endTime = curFixTimes[-1]
curFixDen = computeDispersion(
gazeEOx[curFixIdxs], gazeEOy[curFixIdxs], gazeEOz[curFixIdxs]
)
fixDens.append(curFixDen)
return fixDens
def computeDispersion(gazeX, gazeY, gazeZ):
centroidX = np.mean(gazeX)
centroidY = np.mean(gazeY)
centroidZ = np.mean(gazeZ)
offsetsXsq = np.square(gazeX - centroidX)
offsetsYsq = np.square(gazeY - centroidY)
offsetsZsq = np.square(gazeZ - centroidZ)
dispersion = np.sqrt(
np.mean(offsetsXsq) + | np.mean(offsetsYsq) | numpy.mean |
"""
Created on Sat May 23 18:17:31 2020
celebx = 'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F'
tiotixene = 'CN1CCN(CC1)CCC=C2C3=CC=CC=C3SC4=C2C=C(C=C4)S(=O)(=O)N(C)C'
Troglitazone = 'CC1=C(C2=C(CCC(O2)(C)COC3=CC=C(C=C3)CC4C(=O)NC(=O)S4)C(=C1O)C)C'
@author: akshat
"""
import selfies
import numpy as np
import random
from rdkit.Chem import MolFromSmiles as smi2mol
from rdkit.Chem import MolToSmiles as mol2smi
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.DataStructs.cDataStructs import TanimotoSimilarity
from selfies import encoder, decoder
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
def get_ECFP4(mol):
''' Return rdkit ECFP4 fingerprint object for mol
Parameters:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object
Returns:
rdkit ECFP4 fingerprint object for mol
'''
return AllChem.GetMorganFingerprint(mol, 2)
def sanitize_smiles(smi):
'''Return a canonical smile representation of smi
Parameters:
smi (string) : smile string to be canonicalized
Returns:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)
smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)
conversion_successful (bool): True/False to indicate if conversion was successful
'''
try:
mol = smi2mol(smi, sanitize=True)
smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)
return (mol, smi_canon, True)
except:
return (None, None, False)
def mutate_selfie(selfie, max_molecules_len, write_fail_cases=False):
'''Return a mutated selfie string (only one mutation on slefie is performed)
Mutations are done until a valid molecule is obtained
Rules of mutation: With a 50% propbabily, either:
1. Add a random SELFIE character in the string
2. Replace a random SELFIE character with another
Parameters:
selfie (string) : SELFIE string to be mutated
max_molecules_len (int) : Mutations of SELFIE string are allowed up to this length
write_fail_cases (bool) : If true, failed mutations are recorded in "selfie_failure_cases.txt"
Returns:
selfie_mutated (string) : Mutated SELFIE string
smiles_canon (string) : canonical smile of mutated SELFIE string
'''
valid=False
fail_counter = 0
chars_selfie = get_selfie_chars(selfie)
while not valid:
fail_counter += 1
alphabet = list(selfies.get_semantic_robust_alphabet()) # 34 SELFIE characters
choice_ls = [1, 2] # 1=Insert; 2=Replace; 3=Delete
random_choice = np.random.choice(choice_ls, 1)[0]
# Insert a character in a Random Location
if random_choice == 1:
random_index = np.random.randint(len(chars_selfie)+1)
random_character = | np.random.choice(alphabet, size=1) | numpy.random.choice |
"""
``CovEmu`` is an emulator for covariance matrices
(or any set of real-symmetric matrices).
"""
from copy import deepcopy
import george
import numpy as np
import scipy.optimize as op
from george.kernels import ExpSquaredKernel
from covariance_emulator.breakdown import (
breakdown_covariance,
breakdown_covariance_from_components,
)
class CovEmu(object):
"""
Generalized emulator for covariance matrices.
"""
def __init__(
self,
parameters,
covariance_matrices,
NPC_D=1,
NPC_L=1,
kernel_D=None,
kernel_lp=None,
):
Cs = np.atleast_3d(covariance_matrices)
self.N = Cs.shape[0]
parameters = np.atleast_2d(parameters).reshape(self.N, -1)
self.Npars = len(parameters[0])
assert len(parameters) == len(Cs), f"{parameters.shape} vs {Cs.shape}"
assert parameters.ndim == 2, parameters.ndim
assert Cs.ndim == 3, Cs.ndim
msg = "all covariances must have the same dimension"
assert all(len(C) == len(C[0]) for C in Cs), msg
# Save all attributes
self.NPC_D = NPC_D
self.NPC_L = NPC_L
self.covariance_matrices = Cs
self.parameters = parameters
# Create kernels for the emulator
metric_guess = np.std(self.parameters, 0)
self.kernel_D = kernel_D or 1.0 * ExpSquaredKernel(
metric=metric_guess, ndim=self.Npars
)
self.kernel_lp = kernel_lp or 1.0 * ExpSquaredKernel(
metric=metric_guess, ndim=self.Npars
)
# Call methods that start to build the emulator
self.breakdown_matrices()
self.create_training_data()
self.build_emulator()
self.train_emulator()
@classmethod
def from_Ds_Lprimes(cls, Ds, Lprimes):
"""
Reconstruct all covariance matrices from their individual parts
and assemble the emulator from those.
"""
pass
def breakdown_matrices(self):
"""
Break down matrices into their constituent parts.
:returns:
None
"""
Cs = self.covariance_matrices
ND = len(self.covariance_matrices[0])
NLp = int(ND * (ND - 1) / 2)
Ds = np.zeros((self.N, ND))
Lprimes = np.zeros((self.N, NLp))
# Loop over matrices and break them down
for i in range(self.N):
breakdown = breakdown_covariance(Cs[i])
Ds[i] = breakdown["D"]
Lprimes[i] = breakdown["Lprime"]
continue
# Save the broken down data
self.Ds = Ds
self.ds_raw = np.log(Ds)
self.Lprimes = Lprimes
# Compute their first statistical moments
self.d_mean = np.mean(self.ds_raw, 0)
self.d_std = np.std(self.ds_raw, 0)
self.Lprime_mean = | np.mean(Lprimes, 0) | numpy.mean |
import mxnet as mx
import cv2 as cv
import numpy as np
import os
from PIL import Image
import math
from collections import namedtuple
from mxnet.contrib.onnx import import_model
import cityscapes_labels
def preprocess(im, rgb_mean):
# Convert to float32
test_img = im.astype(np.float32)
# Extrapolate image with a small border in order obtain an accurate reshaped image after DUC layer
test_shape = [im.shape[0],im.shape[1]]
cell_shapes = [math.ceil(l / 8)*8 for l in test_shape]
test_img = cv.copyMakeBorder(test_img, 0, max(0, int(cell_shapes[0]) - im.shape[0]), 0, max(0, int(cell_shapes[1]) - im.shape[1]), cv.BORDER_CONSTANT, value=rgb_mean)
test_img = np.transpose(test_img, (2, 0, 1))
# subtract rbg mean
for i in range(3):
test_img[i] -= rgb_mean[i]
test_img = np.expand_dims(test_img, axis=0)
return test_img
def get_palette():
# get train id to color mappings from file
trainId2colors = {label.trainId: label.color for label in cityscapes_labels.labels}
# prepare and return palette
palette = [0] * 256 * 3
for trainId in trainId2colors:
colors = trainId2colors[trainId]
if trainId == 255:
colors = (0, 0, 0)
for i in range(3):
palette[trainId * 3 + i] = colors[i]
return palette
def colorize(labels):
# generate colorized image from output labels and color palette
result_img = Image.fromarray(labels).convert('P')
result_img.putpalette(get_palette())
return np.array(result_img.convert('RGB'))
def predict(imgs, result_shape, mod, im):
# get input and output dimensions
result_height, result_width = result_shape
_, _, img_height, img_width = imgs.shape
# set downsampling rate
ds_rate = 8
# set cell width
cell_width = 2
# number of output label classes
label_num = 19
# Perform forward pass
batch = namedtuple('Batch', ['data'])
mod.forward(batch([imgs]),is_train=False)
labels = mod.get_outputs()[0].asnumpy().squeeze()
# re-arrange output
test_width = int((int(img_width) / ds_rate) * ds_rate)
test_height = int((int(img_height) / ds_rate) * ds_rate)
feat_width = int(test_width / ds_rate)
feat_height = int(test_height / ds_rate)
labels = labels.reshape((label_num, 4, 4, feat_height, feat_width))
labels = np.transpose(labels, (0, 3, 1, 4, 2))
labels = labels.reshape((label_num, int(test_height / cell_width), int(test_width / cell_width)))
labels = labels[:, :int(img_height / cell_width),:int(img_width / cell_width)]
labels = np.transpose(labels, [1, 2, 0])
labels = cv.resize(labels, (result_width, result_height), interpolation=cv.INTER_LINEAR)
labels = | np.transpose(labels, [2, 0, 1]) | numpy.transpose |
import random
import numpy as np
import matplotlib.pyplot as plt
phi = lambda vec: np.sin(np.product(vec))
h = lambda x: np.sqrt(1 + np.sin(np.product(x)) ** 2)
# Section 1.1.5 - Analytical Evaluation
# SubSection 1
def gradient_phi(vec: np.matrix):
grad_vec = np.matrix(np.array([[vec[1] * vec[2]], [vec[0] * vec[2]], [vec[0] * vec[1]]]), dtype=np.float128)
grad_vec = grad_vec.transpose()
return np.cos(np.product(vec)) * grad_vec
def hessian_phi(mat: np.matrix, vec: np.matrix):
nabla = np.zeros((3, 3))
nabla[0, 0] = (-((vec[1] * vec[2]) ** 2) * np.sin(np.product(vec)))
nabla[0, 1] = vec[2] * np.cos(np.product(vec)) - np.product(vec) * vec[2] * np.sin(np.product(vec))
nabla[0, 2] = vec[1] * np.cos(np.product(vec)) - np.product(vec) * vec[1] * np.sin(np.product(vec))
nabla[1, 0] = vec[2] * np.cos(np.product(vec)) - np.product(vec) * vec[2] * np.sin(np.product(vec))
nabla[1, 1] = (-((vec[0] * vec[2]) ** 2) * np.sin(np.product(vec)))
nabla[1, 2] = vec[0] * np.cos(np.product(vec)) - np.product(vec) * vec[0] * np.sin(np.product(vec))
nabla[2, 0] = vec[1] * np.cos(np.product(vec)) - np.product(vec) * vec[1] * np.sin(np.product(vec))
nabla[2, 1] = vec[0] * np.cos(np.product(vec)) - np.product(vec) * vec[0] * np.sin(np.product(vec))
nabla[2, 2] = (-((vec[0] * vec[1]) ** 2) * np.sin(np.product(vec)))
return mat.transpose() * nabla * mat
# SubSection 2
def gradient_h(vec: np.matrix):
return 0.5 * phi(vec) / ((1 + (phi(vec) ** 2)) ** 0.5) \
* gradient_phi(vec)
def hessian_h(vec: np.matrix):
# Calculating the 2nd derivative of h of phi
derivative = (np.cos(np.product(vec)) ** 2 -
(1 + np.sin(np.product(vec)) ** 2) * np.sin(np.product(vec)) ** 2) / \
(1 + np.sin(np.product(vec)) ** 2) ** 1.5
return derivative * hessian_phi(np.matrix(np.identity(3)), vec)
# Section 1.2.1 - Numerical Differentiation
def numerical_diff_gradient(func, vector: np.matrix, epsilon):
vec_len = vector.shape[0]
assert vector.shape[1] == 1
assert vec_len > 0
gradient = np.matrix(np.zeros((vec_len, 1)), dtype=np.float128)
for i in range(vec_len):
base_vector = np.matrix(np.zeros((vec_len, 1)), dtype=np.float128)
base_vector[i, 0] = 1
func_plus = func(vector + (epsilon * base_vector))
func_minus = func(vector - (epsilon * base_vector))
gradient[i, 0] = ((func_plus - func_minus) / (2 * epsilon))
return gradient
def numerical_diff_hessian(vec: np.matrix, epsilon, hess_phi=False, hess_h=False,):
vec_len = vec.shape[0]
assert vec.shape[1] == 1
assert vec_len > 0
hessian = np.matrix(np.zeros((vec_len, vec_len)), dtype=np.float128)
for i in range(vec_len):
base_vector = np.matrix( | np.zeros((vec_len, 1)) | numpy.zeros |
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
class DatasetSimple(Dataset):
"""
Args:
root (string): Root directory path.
frame_list_path (string): Frame list path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
sample_transform (callable, optional): A function/transform that takes
in the target and transforms it.
"""
def __init__(self, root, frame_list_path, transform=None, sample_transform=None):
self.root = root
self.frame_list = np.loadtxt(frame_list_path, skiprows=1, dtype=str)
self.transform = transform
self.sample_transform = sample_transform
def __len__(self):
return len(self.frame_list)
def __getitem__(self, index):
image_name = self.frame_list[index]
image_path = os.path.join(self.root, image_name)
with open(image_path, "rb") as f:
img = Image.open(f)
img.convert("RGB")
if self.transform:
img = self.transform(img)
return img, image_name
def box_to_center_scale(box, model_image_width, model_image_height):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list | ndarray
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = | np.zeros(2, dtype=np.float32) | numpy.zeros |
# standard modules
import ast
import csv
import datetime
import logging
import os
import platform
import subprocess
import Tkinter
# 3rd party modules
import dateutil
import matplotlib.dates as mdt
import matplotlib.pyplot as plt
import numpy
import pylab
# PFP modules
import constants as c
import pfp_ck
import pfp_io
import pfp_ts
import pfp_utils
logger = logging.getLogger("pfp_log")
# functions for GapFillUsingSOLO
def GapFillUsingSOLO(cf, dsa, dsb):
'''
This is the "Run SOLO" GUI.
The SOLO GUI is displayed separately from the main OzFluxQC GUI.
It consists of text to display the start and end datetime of the file,
two entry boxes for the start and end datetimes of the SOLO run and
a button to run SOLO ("Run SOLO") and a button to exit the SOLO GUI
when we are done. On exit, the OzFluxQC main GUI continues and eventually
writes the gap filled data to file.
'''
# set the default return code
dsb.returncodes["solo"] = "normal"
if "solo" not in dir(dsb):
return
# local pointer to the datetime series
ldt = dsb.series["DateTime"]["Data"]
startdate = ldt[0]
enddate = ldt[-1]
solo_info = {"file_startdate":startdate.strftime("%Y-%m-%d %H:%M"),
"file_enddate":enddate.strftime("%Y-%m-%d %H:%M"),
"startdate":startdate.strftime("%Y-%m-%d %H:%M"),
"enddate":enddate.strftime("%Y-%m-%d %H:%M")}
# check to see if this is a batch or an interactive run
call_mode = pfp_utils.get_keyvaluefromcf(cf, ["Options"], "call_mode", default="interactive")
solo_info["call_mode"]= call_mode
if call_mode.lower()=="interactive":
solo_info["show_plots"] = True
if call_mode.lower()=="interactive":
# put up a plot of the data coverage at L4
gfSOLO_plotcoveragelines(dsb, solo_info)
# call the GapFillUsingSOLO GUI
gfSOLO_gui(dsa, dsb, solo_info)
else:
if "GUI" in cf:
if "SOLO" in cf["GUI"]:
gfSOLO_run_nogui(cf, dsa, dsb, solo_info)
else:
logger.warning(" No GUI sub-section found in Options section of control file")
gfSOLO_plotcoveragelines(dsb, solo_info)
gfSOLO_gui(dsa, dsb, solo_info)
else:
logger.warning(" No GUI sub-section found in Options section of control file")
gfSOLO_plotcoveragelines(dsb, solo_info)
gfSOLO_gui(dsa, dsb, solo_info)
def gfSOLO_gui(dsa, dsb, solo_info):
ldt = dsb.series["DateTime"]["Data"]
# set up the GUI
solo_gui = Tkinter.Toplevel()
solo_gui.wm_title("SOLO GUI (Fluxes)")
solo_gui.grid()
# top row
nrow = 0
solo_gui.nodesLabel = Tkinter.Label(solo_gui,text="Nodes")
solo_gui.nodesLabel.grid(row=nrow,column=0,columnspan=1,sticky="E")
solo_gui.nodesEntry = Tkinter.Entry(solo_gui,width=6)
solo_gui.nodesEntry.grid(row=nrow,column=1,columnspan=1,sticky="W")
solo_gui.nodesEntry.insert(0,"Auto")
solo_gui.trainingLabel = Tkinter.Label(solo_gui,text="Training")
solo_gui.trainingLabel.grid(row=nrow,column=2,columnspan=1,sticky="E")
solo_gui.trainingEntry = Tkinter.Entry(solo_gui,width=6)
solo_gui.trainingEntry.grid(row=nrow,column=3,columnspan=1,sticky="W")
solo_gui.trainingEntry.insert(0,"500")
solo_gui.factorLabel = Tkinter.Label(solo_gui,text="Nda factor")
solo_gui.factorLabel.grid(row=nrow,column=4,columnspan=1,sticky="E")
solo_gui.factorEntry = Tkinter.Entry(solo_gui,width=6)
solo_gui.factorEntry.grid(row=nrow,column=5,columnspan=1,sticky="W")
solo_gui.factorEntry.insert(0,"5")
# second row
nrow = nrow + 1
solo_gui.learningrateLabel = Tkinter.Label(solo_gui,text="Learning")
solo_gui.learningrateLabel.grid(row=nrow,column=2,columnspan=1,sticky="E")
solo_gui.learningrateEntry = Tkinter.Entry(solo_gui,width=6)
solo_gui.learningrateEntry.grid(row=nrow,column=3,columnspan=1,sticky="W")
solo_gui.learningrateEntry.insert(0,"0.001")
solo_gui.iterationsLabel = Tkinter.Label(solo_gui,text="Iterations")
solo_gui.iterationsLabel.grid(row=nrow,column=4,columnspan=1,sticky="E")
solo_gui.iterationsEntry = Tkinter.Entry(solo_gui,width=6)
solo_gui.iterationsEntry.grid(row=nrow,column=5,columnspan=1,sticky="W")
solo_gui.iterationsEntry.insert(0,"500")
# third row
nrow = nrow + 1
solo_gui.filestartLabel = Tkinter.Label(solo_gui,text="File start date")
solo_gui.filestartLabel.grid(row=nrow,column=0,columnspan=3)
solo_gui.fileendLabel = Tkinter.Label(solo_gui,text="File end date")
solo_gui.fileendLabel.grid(row=nrow,column=3,columnspan=3)
# fourth row
nrow = nrow + 1
solo_gui.filestartValue = Tkinter.Label(solo_gui,text=str(ldt[0]))
solo_gui.filestartValue.grid(row=nrow,column=0,columnspan=3)
solo_gui.fileendValue = Tkinter.Label(solo_gui,text=str(ldt[-1]))
solo_gui.fileendValue.grid(row=nrow,column=3,columnspan=3)
# fifth row
nrow = nrow + 1
solo_gui.startLabel = Tkinter.Label(solo_gui, text="Start date (YYYY-MM-DD)")
solo_gui.startLabel.grid(row=nrow,column=0,columnspan=3)
solo_gui.startEntry = Tkinter.Entry(solo_gui)
solo_gui.startEntry.grid(row=nrow,column=3,columnspan=3)
# sixth row
nrow = nrow + 1
solo_gui.endLabel = Tkinter.Label(solo_gui, text="End date (YYYY-MM-DD)")
solo_gui.endLabel.grid(row=nrow,column=0,columnspan=3)
solo_gui.endEntry = Tkinter.Entry(solo_gui)
solo_gui.endEntry.grid(row=nrow,column=3,columnspan=3)
# seventh row
nrow = nrow + 1
solo_gui.peropt = Tkinter.IntVar()
solo_gui.peropt.set(2)
solo_gui.manualperiod = Tkinter.Radiobutton(solo_gui,text="Manual",variable=solo_gui.peropt,value=1)
solo_gui.manualperiod.grid(row=nrow,column=0,columnspan=1,sticky="W")
#solo_gui.manualperiod = Tkinter.Radiobutton(solo_gui,text="Auto",variable=solo_gui.peropt,value=4)
#solo_gui.manualperiod.grid(row=nrow,column=1,columnspan=1,sticky="W")
solo_gui.minptsLabel = Tkinter.Label(solo_gui,text="Min. pts (%)")
solo_gui.minptsLabel.grid(row=nrow,column=3,columnspan=1,sticky="E")
solo_gui.minptsEntry = Tkinter.Entry(solo_gui,width=5)
solo_gui.minptsEntry.grid(row=nrow,column=4,columnspan=1,sticky="W")
solo_gui.minptsEntry.insert(0,"25")
# eigth row
nrow = nrow + 1
solo_gui.automonthly = Tkinter.Radiobutton(solo_gui,text="Monthly",variable=solo_gui.peropt,value=2)
solo_gui.automonthly.grid(row=nrow,column=0,columnspan=1,sticky="W")
solo_gui.daysLabel = Tkinter.Radiobutton(solo_gui,text="Days",variable=solo_gui.peropt,value=3)
solo_gui.daysLabel.grid(row=nrow,column=1,columnspan=1,sticky="W")
solo_gui.daysEntry = Tkinter.Entry(solo_gui,width=3)
solo_gui.daysEntry.grid(row=nrow,column=2,columnspan=1,sticky="W")
solo_gui.daysEntry.insert(0,"90")
solo_gui.autocompleteopt = Tkinter.IntVar()
solo_gui.autocompleteopt.set(1)
solo_gui.autocomplete = Tkinter.Checkbutton(solo_gui, text="Auto complete", variable=solo_gui.autocompleteopt)
solo_gui.autocomplete.grid(row=nrow,column=3,columnspan=3,sticky="w")
# ninth row
nrow = nrow + 1
solo_gui.pltopt = Tkinter.IntVar()
solo_gui.pltopt.set(1)
solo_gui.showplots = Tkinter.Checkbutton(solo_gui, text="Show plots", variable=solo_gui.pltopt)
solo_gui.showplots.grid(row=nrow,column=0,columnspan=3,sticky="w")
solo_gui.owopt = Tkinter.IntVar()
solo_gui.owopt.set(0)
solo_gui.overwrite = Tkinter.Checkbutton(solo_gui, text="Overwrite", variable=solo_gui.owopt)
solo_gui.overwrite.grid(row=nrow,column=3,columnspan=3,sticky="w")
# tenth row
nrow = nrow + 1
solo_gui.doneButton = Tkinter.Button (solo_gui, text="Done",command=lambda:gfSOLO_done(dsb,solo_gui,solo_info))
solo_gui.doneButton.grid(row=nrow,column=0,columnspan=2)
solo_gui.runButton = Tkinter.Button (solo_gui, text="Run",command=lambda:gfSOLO_run_gui(dsa,dsb,solo_gui,solo_info))
solo_gui.runButton.grid(row=nrow,column=2,columnspan=2)
solo_gui.quitButton = Tkinter.Button (solo_gui, text="Quit",command=lambda:gfSOLO_quit(dsb,solo_gui))
solo_gui.quitButton.grid(row=nrow,column=4,columnspan=2)
# eleventh row
nrow = nrow + 1
solo_gui.progress_row = nrow
solo_gui.progress = Tkinter.Label(solo_gui, text='Waiting for input ...')
solo_gui.progress.grid(row=nrow,column=0,columnspan=6,sticky="W")
solo_gui.wait_window(solo_gui)
def gfSOLO_autocomplete(dsa, dsb, solo_info):
if not solo_info["auto_complete"]: return
ldt = dsb.series["DateTime"]["Data"]
nRecs = len(ldt)
for output in dsb.solo.keys():
not_enough_points = False
series = dsb.solo[output]["label_tower"]
data_solo, _, _ = pfp_utils.GetSeriesasMA(dsb, output)
if numpy.ma.count(data_solo)==0:
continue
mask_solo = numpy.ma.getmaskarray(data_solo)
gapstartend = pfp_utils.contiguous_regions(mask_solo)
data_obs, _, _ = pfp_utils.GetSeriesasMA(dsb, series)
for si_gap, ei_gap in gapstartend:
min_points = int((ei_gap-si_gap)*solo_info["min_percent"]/100)
num_good_points = numpy.ma.count(data_obs[si_gap:ei_gap])
while num_good_points < min_points:
si_gap = max([0, si_gap - solo_info["nperday"]])
ei_gap = min([nRecs-1, ei_gap + solo_info["nperday"]])
if si_gap == 0 and ei_gap == nRecs-1:
msg = " Unable to find enough good points in series "+series
logger.warning(msg)
not_enough_points = True
if not_enough_points:
break
min_points = int((ei_gap-si_gap)*solo_info["min_percent"]/100)
num_good_points = numpy.ma.count(data_obs[si_gap:ei_gap])
if not_enough_points:
break
si = max([0, si_gap])
ei = min([len(ldt)-1, ei_gap])
solo_info["startdate"] = ldt[si].strftime("%Y-%m-%d %H:%M")
solo_info["enddate"] = ldt[ei].strftime("%Y-%m-%d %H:%M")
gfSOLO_main(dsa, dsb, solo_info, output_list=[output])
gfSOLO_plotcoveragelines(dsb, solo_info)
def gfSOLO_done(ds,solo_gui,solo_info):
# plot the summary statistics if gap filling was done manually
if solo_gui.peropt.get()==1:
# write Excel spreadsheet with fit statistics
pfp_io.xl_write_SOLOStats(ds)
# plot the summary statistics
gfSOLO_plotsummary(ds,solo_info)
# destroy the SOLO GUI
solo_gui.destroy()
# remove the solo dictionary from the data structure
ds.returncodes["solo"] = "normal"
def gfSOLO_getserieslist(cf):
series_list = []
if "Drivers" in cf.keys():
for series in cf["Drivers"].keys():
if "GapFillUsingSOLO" in cf["Drivers"][series]:
series_list.append(series)
elif "Fluxes" in cf.keys():
for series in cf["Fluxes"].keys():
if "GapFillUsingSOLO" in cf["Fluxes"][series]:
series_list.append(series)
elif "Variables" in cf.keys():
for series in cf["Variables"].keys():
if "GapFillUsingSOLO" in cf["Variables"][series]:
series_list.append(series)
else:
series_list = []
msg = "No Variables, Drivers or Fluxes section found in control file"
logger.error(msg)
return series_list
def gfSOLO_initplot(**kwargs):
# set the margins, heights, widths etc
pd = {"margin_bottom":0.075, "margin_top":0.075, "margin_left":0.05, "margin_right":0.05,
"xy_height":0.20, "xy_width":0.20, "xyts_space":0.05, "ts_width":0.9}
# set the keyword arguments
for key, value in kwargs.iteritems():
pd[key] = value
# calculate bottom of the first time series and the height of the time series plots
pd["ts_bottom"] = pd["margin_bottom"]+pd["xy_height"]+pd["xyts_space"]
pd["ts_height"] = (1.0 - pd["margin_top"] - pd["ts_bottom"])/float(pd["nDrivers"]+1)
return pd
def gfSOLO_main(dsa,dsb,solo_info,output_list=[]):
'''
This is the main routine for running SOLO, an artifical neural network for gap filling fluxes.
'''
if len(output_list)==0: output_list = dsb.solo.keys()
startdate = solo_info["startdate"]
enddate = solo_info["enddate"]
logger.info(" Gap filling using SOLO: "+startdate+" to "+enddate)
# read the control file again, this allows the contents of the control file to
# be changed with the SOLO GUI still displayed
cfname = dsb.globalattributes["controlfile_name"]
cf = pfp_io.get_controlfilecontents(cfname,mode="quiet")
solo_info["plot_path"] = cf["Files"]["plot_path"]
# put the control file object in the solo_info dictionary
dsb.cf = cf.copy()
# get some useful things
site_name = dsa.globalattributes["site_name"]
# get the time step and a local pointer to the datetime series
ts = dsb.globalattributes["time_step"]
ldt = dsb.series["DateTime"]["Data"]
xldt = dsb.series["xlDateTime"]["Data"]
# get the start and end datetime indices
si = pfp_utils.GetDateIndex(ldt,startdate,ts=ts,default=0,match="exact")
ei = pfp_utils.GetDateIndex(ldt,enddate,ts=ts,default=len(ldt)-1,match="exact")
# check the start and end indices
if si >= ei:
msg = " GapFillUsingSOLO: end datetime index ("+str(ei)+") smaller that start ("+str(si)+")"
logger.warning(msg)
return
if si==0 and ei==-1:
msg = " GapFillUsingSOLO: no start and end datetime specified, using all data"
logger.warning(msg)
nRecs = int(dsb.globalattributes["nc_nrecs"])
else:
nRecs = ei - si + 1
# loop over the series to be gap filled using solo
#solo_info["min_points"] = int(nRecs*solo_info["min_percent"]/100)
solo_info["min_points"] = int((ei-si)*solo_info["min_percent"]/100)
# close any open plot windows
if len(plt.get_fignums())!=0:
for i in plt.get_fignums():
if i!=0: plt.close(i)
fig_num = 0
for output in output_list:
# get the target series label
series = dsb.solo[output]["label_tower"]
# clean up the target series if required
variable = pfp_utils.GetVariable(dsb, series)
pfp_ck.UpdateVariableAttributes_QC(cf, variable)
pfp_ck.ApplyQCChecks(variable)
pfp_utils.CreateVariable(dsb, variable)
# check to see if we are gap filling L5 or L4
if dsb.globalattributes["nc_level"].lower()=="l4":
for driver in dsb.solo[output]["drivers"]:
for mlist in dsb.merge.keys():
if driver in dsb.merge[mlist]:
srclist = dsb.merge[mlist][driver]["source"]
pfp_ts.do_mergeseries(dsb,driver,srclist,mode="quiet")
dsb.solo[output]["results"]["startdate"].append(xldt[si])
dsb.solo[output]["results"]["enddate"].append(xldt[ei])
d,f,a = pfp_utils.GetSeriesasMA(dsb,series,si=si,ei=ei)
if numpy.ma.count(d)<solo_info["min_points"]:
logger.warning("gfSOLO: Less than "+str(solo_info["min_points"])+" points available for series "+series+" ...")
dsb.solo[output]["results"]["No. points"].append(float(0))
results_list = dsb.solo[output]["results"].keys()
for item in ["startdate","enddate","No. points"]:
if item in results_list: results_list.remove(item)
for item in results_list:
dsb.solo[output]["results"][item].append(float(c.missing_value))
continue
drivers = dsb.solo[output]["drivers"]
if str(solo_info["nodes"]).lower()=="auto":
solo_info["nodes_target"] = len(drivers)+1
else:
solo_info["nodes_target"] = int(solo_info["nodes"])
#output = dsb.solo[series]["output"]
# set the number of nodes for the inf files
#nodesAuto = gfSOLO_setnodesEntry(solo_gui,drivers)
# overwrite the GUI settings if required
if "solo_settings" in dsb.solo[output]:
solo_info["nodes_target"] = dsb.solo[output]["solo_settings"]["nodes_target"]
solo_info["training"] = dsb.solo[output]["solo_settings"]["training"]
solo_info["factor"] = dsb.solo[output]["solo_settings"]["factor"]
solo_info["learningrate"] = dsb.solo[output]["solo_settings"]["learningrate"]
solo_info["iterations"] = dsb.solo[output]["solo_settings"]["iterations"]
# write the inf files for sofm, solo and seqsolo
gfSOLO_writeinffiles(solo_info)
# run SOFM
result = gfSOLO_runsofm(dsa,dsb,drivers,series,nRecs,si=si,ei=ei)
if result!=1: return
# run SOLO
result = gfSOLO_runsolo(dsa,dsb,drivers,series,nRecs,si=si,ei=ei)
if result!=1: return
# run seqsolo and put the solo_modelled data into the ds series
result = gfSOLO_runseqsolo(dsa,dsb,drivers,series,output,nRecs,si=si,ei=ei)
if result!=1: return
# plot the results
fig_num = fig_num + 1
title = site_name+' : Comparison of tower and SOLO data for '+series
pd = gfSOLO_initplot(site_name=site_name,label=series,fig_num=fig_num,title=title,
nDrivers=len(drivers))
gfSOLO_plot(pd,dsa,dsb,drivers,series,output,solo_info,si=si,ei=ei)
# reset the nodesEntry in the solo_gui
#if nodesAuto: gfSOLO_resetnodesEntry(solo_gui)
if 'GapFillUsingSOLO' not in dsb.globalattributes['Functions']:
dsb.globalattributes['Functions'] = dsb.globalattributes['Functions']+', GapFillUsingSOLO'
def gfSOLO_plot(pd,dsa,dsb,driverlist,targetlabel,outputlabel,solo_info,si=0,ei=-1):
""" Plot the results of the SOLO run. """
# get the time step
ts = int(dsb.globalattributes['time_step'])
# get a local copy of the datetime series
xdt = dsb.series["DateTime"]["Data"][si:ei+1]
Hdh,f,a = pfp_utils.GetSeriesasMA(dsb,'Hdh',si=si,ei=ei)
# get the observed and modelled values
obs,f,a = pfp_utils.GetSeriesasMA(dsb,targetlabel,si=si,ei=ei)
mod,f,a = pfp_utils.GetSeriesasMA(dsb,outputlabel,si=si,ei=ei)
# make the figure
if solo_info["show_plots"]:
plt.ion()
else:
plt.ioff()
fig = plt.figure(pd["fig_num"],figsize=(13,8))
fig.clf()
fig.canvas.set_window_title(targetlabel)
plt.figtext(0.5,0.95,pd["title"],ha='center',size=16)
# XY plot of the diurnal variation
rect1 = [0.10,pd["margin_bottom"],pd["xy_width"],pd["xy_height"]]
ax1 = plt.axes(rect1)
# get the diurnal stats of the observations
mask = numpy.ma.mask_or(obs.mask,mod.mask)
obs_mor = numpy.ma.array(obs,mask=mask)
Num1,Hr1,Av1,Sd1,Mx1,Mn1 = gf_getdiurnalstats(Hdh,obs_mor,ts)
ax1.plot(Hr1,Av1,'b-',label="Obs")
# get the diurnal stats of all SOLO predictions
Num2,Hr2,Av2,Sd2,Mx2,Mn2 = gf_getdiurnalstats(Hdh,mod,ts)
ax1.plot(Hr2,Av2,'r-',label="SOLO(all)")
# get the diurnal stats of SOLO predictions when the obs are present
mod_mor = numpy.ma.array(mod,mask=mask)
if numpy.ma.count_masked(obs)!=0:
index = numpy.where(numpy.ma.getmaskarray(obs)==False)[0]
#index = numpy.ma.where(numpy.ma.getmaskarray(obs)==False)[0]
# get the diurnal stats of SOLO predictions when observations are present
Num3,Hr3,Av3,Sd3,Mx3,Mn3=gf_getdiurnalstats(Hdh[index],mod_mor[index],ts)
ax1.plot(Hr3,Av3,'g-',label="SOLO(obs)")
plt.xlim(0,24)
plt.xticks([0,6,12,18,24])
ax1.set_ylabel(targetlabel)
ax1.set_xlabel('Hour')
ax1.legend(loc='upper right',frameon=False,prop={'size':8})
# XY plot of the 30 minute data
rect2 = [0.40,pd["margin_bottom"],pd["xy_width"],pd["xy_height"]]
ax2 = plt.axes(rect2)
ax2.plot(mod,obs,'b.')
ax2.set_ylabel(targetlabel+'_obs')
ax2.set_xlabel(targetlabel+'_SOLO')
# plot the best fit line
coefs = numpy.ma.polyfit(numpy.ma.copy(mod),numpy.ma.copy(obs),1)
xfit = numpy.ma.array([numpy.ma.minimum(mod),numpy.ma.maximum(mod)])
yfit = numpy.polyval(coefs,xfit)
r = numpy.ma.corrcoef(mod,obs)
ax2.plot(xfit,yfit,'r--',linewidth=3)
eqnstr = 'y = %.3fx + %.3f, r = %.3f'%(coefs[0],coefs[1],r[0][1])
ax2.text(0.5,0.875,eqnstr,fontsize=8,horizontalalignment='center',transform=ax2.transAxes)
# write the fit statistics to the plot
numpoints = trap_masked_constant(numpy.ma.count(obs))
numfilled = trap_masked_constant(numpy.ma.count(mod)-numpy.ma.count(obs))
diff = mod - obs
bias = trap_masked_constant(numpy.ma.average(diff))
fractional_bias = trap_masked_constant(bias/(0.5*(numpy.ma.average(obs+mod))))
dsb.solo[outputlabel]["results"]["Bias"].append(bias)
dsb.solo[outputlabel]["results"]["Frac Bias"].append(fractional_bias)
rmse = numpy.ma.sqrt(numpy.ma.mean((obs-mod)*(obs-mod)))
mean_mod = numpy.ma.mean(mod)
mean_obs = numpy.ma.mean(obs)
data_range = numpy.ma.maximum(obs)-numpy.ma.minimum(obs)
nmse = rmse/data_range
plt.figtext(0.65,0.225,'No. points')
plt.figtext(0.75,0.225,str(numpoints))
dsb.solo[outputlabel]["results"]["No. points"].append(numpoints)
plt.figtext(0.65,0.200,'No. filled')
plt.figtext(0.75,0.200,str(numfilled))
plt.figtext(0.65,0.175,'Nodes')
plt.figtext(0.75,0.175,str(solo_info["nodes_target"]))
plt.figtext(0.65,0.150,'Training')
plt.figtext(0.75,0.150,str(solo_info["training"]))
plt.figtext(0.65,0.125,'Nda factor')
plt.figtext(0.75,0.125,str(solo_info["factor"]))
plt.figtext(0.65,0.100,'Learning rate')
plt.figtext(0.75,0.100,str(solo_info["learningrate"]))
plt.figtext(0.65,0.075,'Iterations')
plt.figtext(0.75,0.075,str(solo_info["iterations"]))
plt.figtext(0.815,0.225,'Slope')
plt.figtext(0.915,0.225,str(pfp_utils.round2sig(coefs[0],sig=4)))
dsb.solo[outputlabel]["results"]["m_ols"].append(trap_masked_constant(coefs[0]))
plt.figtext(0.815,0.200,'Offset')
plt.figtext(0.915,0.200,str(pfp_utils.round2sig(coefs[1],sig=4)))
dsb.solo[outputlabel]["results"]["b_ols"].append(trap_masked_constant(coefs[1]))
plt.figtext(0.815,0.175,'r')
plt.figtext(0.915,0.175,str(pfp_utils.round2sig(r[0][1],sig=4)))
dsb.solo[outputlabel]["results"]["r"].append(trap_masked_constant(r[0][1]))
plt.figtext(0.815,0.150,'RMSE')
plt.figtext(0.915,0.150,str(pfp_utils.round2sig(rmse,sig=4)))
dsb.solo[outputlabel]["results"]["RMSE"].append(trap_masked_constant(rmse))
dsb.solo[outputlabel]["results"]["NMSE"].append(trap_masked_constant(nmse))
var_obs = numpy.ma.var(obs)
plt.figtext(0.815,0.125,'Var (obs)')
plt.figtext(0.915,0.125,'%.4g'%(var_obs))
dsb.solo[outputlabel]["results"]["Var (obs)"].append(trap_masked_constant(var_obs))
var_mod = numpy.ma.var(mod)
plt.figtext(0.815,0.100,'Var (SOLO)')
plt.figtext(0.915,0.100,'%.4g'%(var_mod))
dsb.solo[outputlabel]["results"]["Var (SOLO)"].append(trap_masked_constant(var_mod))
dsb.solo[outputlabel]["results"]["Var ratio"].append(trap_masked_constant(var_obs/var_mod))
dsb.solo[outputlabel]["results"]["Avg (obs)"].append(trap_masked_constant(numpy.ma.average(obs)))
dsb.solo[outputlabel]["results"]["Avg (SOLO)"].append(trap_masked_constant( | numpy.ma.average(mod) | numpy.ma.average |
""" Helper methods for loading and parsing KITTI data.
Author: <NAME>, <NAME>
Date: September 2017/2018
https://github.com/kuixu/kitti_object_vis
"""
import numpy as np
cbox = np.array([[0,70],[-40,40],[-2.5,1]])
class Calibration(object):
''' Calibration matrices and utils
3d XYZ in <label>.txt are in rect camera coord.
2d box xy are in image2 coord
Points in <lidar>.bin are in Velodyne coord.
y_image2 = P^2_rect * x_rect
y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo
x_ref = Tr_velo_to_cam * x_velo
x_rect = R0_rect * x_ref
P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;
0, f^2_v, c^2_v, -f^2_v b^2_y;
0, 0, 1, 0]
= K * [1|t]
image2 coord:
----> x-axis (u)
|
|
v y-axis (v)
velodyne coord:
front x, left y, up z
rect/ref camera coord:
right x, down y, front z
Ref (KITTI paper): http://www.cvlibs.net/publications/Geiger2013IJRR.pdf
TODO(rqi): do matrix multiplication only once for each projection.
'''
def __init__(self, calib_filepath, from_video=False):
calibs = self.read_calib_file(calib_filepath)
# Projection matrix from rect camera coord to image2 coord
self.P = calibs['P2']
self.P = np.reshape(self.P, [3,4])
# Rigid transform from Velodyne coord to reference camera coord
self.V2C = calibs['Tr_velo_to_cam']
self.V2C = np.reshape(self.V2C, [3,4])
self.C2V = inverse_rigid_trans(self.V2C)
# Rotation from reference camera coord to rect camera coord
self.R0 = calibs['R0_rect']
self.R0 = np.reshape(self.R0,[3,3])
# Camera intrinsics and extrinsics
self.c_u = self.P[0,2]
self.c_v = self.P[1,2]
self.f_u = self.P[0,0]
self.f_v = self.P[1,1]
self.b_x = self.P[0,3]/(-self.f_u) # relative
self.b_y = self.P[1,3]/(-self.f_v)
def read_calib_file(self, filepath):
''' Read in a calibration file and parse into a dictionary.
Ref: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
'''
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
line = line.rstrip()
if len(line)==0: continue
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def cart2hom(self, pts_3d):
''' Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
'''
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n,1))))
return pts_3d_hom
# ===========================
# ------- 3d to 3d ----------
# ===========================
def project_velo_to_ref(self, pts_3d_velo):
pts_3d_velo2 = self.cart2hom(pts_3d_velo) # nx4
result = np.dot(pts_3d_velo2, np.transpose(self.V2C))
return result
def project_ref_to_velo(self, pts_3d_ref):
pts_3d_ref = self.cart2hom(pts_3d_ref) # nx4
return np.dot(pts_3d_ref, np.transpose(self.C2V))
def project_rect_to_ref(self, pts_3d_rect):
''' Input and Output are nx3 points '''
return np.transpose(np.dot(np.linalg.inv(self.R0), np.transpose(pts_3d_rect)))
def project_ref_to_rect(self, pts_3d_ref):
''' Input and Output are nx3 points '''
return np.transpose(np.dot(self.R0, np.transpose(pts_3d_ref)))
def project_rect_to_velo(self, pts_3d_rect):
''' Input: nx3 points in rect camera coord.
Output: nx3 points in velodyne coord.
'''
pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
return self.project_ref_to_velo(pts_3d_ref)
def project_velo_to_rect(self, pts_3d_velo):
pts_3d_ref = self.project_velo_to_ref(pts_3d_velo)
result = self.project_ref_to_rect(pts_3d_ref)
# result = np.zeros_like(pts_3d_velo)
return result
# ===========================
# ------- 3d to 2d ----------
# ===========================
def project_rect_to_image(self, pts_3d_rect):
''' Input: nx3 points in rect camera coord.
Output: nx2 points in image2 coord.
'''
pts_3d_rect = self.cart2hom(pts_3d_rect)
pts_2d = np.dot(pts_3d_rect, np.transpose(self.P)) # nx3
pts_2d[:,0] /= pts_2d[:,2]
pts_2d[:,1] /= pts_2d[:,2]
return pts_2d[:,0:2]
def project_velo_to_image(self, pts_3d_velo):
''' Input: nx3 points in velodyne coord.
Output: nx2 points in image2 coord.
'''
pts_3d_rect = self.project_velo_to_rect(pts_3d_velo)
# pts_3d_rect = np.zeros_like(pts_3d_velo)
result = self.project_rect_to_image(pts_3d_rect)
# result = np.zeros((pts_3d_rect.shape[0], 3))
return result
def project_8p_to_4p(self, pts_2d):
x0 = np.min(pts_2d[:,0])
x1 = np.max(pts_2d[:,0])
y0 = np.min(pts_2d[:,1])
y1 = np.max(pts_2d[:,1])
x0 = max(0,x0)
#x1 = min(x1, proj.image_width)
y0 = max(0,y0)
#y1 = min(y1, proj.image_height)
return np.array([x0, y0, x1, y1])
def project_velo_to_4p(self, pts_3d_velo):
''' Input: nx3 points in velodyne coord.
Output: 4 points in image2 coord.
'''
pts_2d_velo = self.project_velo_to_image(pts_3d_velo)
return self.project_8p_to_4p(pts_2d_velo)
# ===========================
# ------- 2d to 3d ----------
# ===========================
def project_image_to_rect(self, uv_depth):
''' Input: nx3 first two channels are uv, 3rd channel
is depth in rect camera coord.
Output: nx3 points in rect camera coord.
'''
n = uv_depth.shape[0]
x = ((uv_depth[:,0]-self.c_u)*uv_depth[:,2])/self.f_u + self.b_x
y = ((uv_depth[:,1]-self.c_v)*uv_depth[:,2])/self.f_v + self.b_y
pts_3d_rect = np.zeros((n,3))
pts_3d_rect[:,0] = x
pts_3d_rect[:,1] = y
pts_3d_rect[:,2] = uv_depth[:,2]
return pts_3d_rect
def project_image_to_velo(self, uv_depth):
pts_3d_rect = self.project_image_to_rect(uv_depth)
return self.project_rect_to_velo(pts_3d_rect)
def project_depth_to_velo(self, depth, constraint_box=True):
depth_pt3d = get_depth_pt3d(depth)
depth_UVDepth = np.zeros_like(depth_pt3d)
depth_UVDepth[:,0] = depth_pt3d[:,1]
depth_UVDepth[:,1] = depth_pt3d[:,0]
depth_UVDepth[:,2] = depth_pt3d[:,2]
#print("depth_pt3d:",depth_UVDepth.shape)
depth_pc_velo = self.project_image_to_velo(depth_UVDepth)
#print("dep_pc_velo:",depth_pc_velo.shape)
if constraint_box:
depth_box_fov_inds = (depth_pc_velo[:,0]< cbox[0][1] ) & \
(depth_pc_velo[:,0]>= cbox[0][0] ) & \
(depth_pc_velo[:,1]< cbox[1][1]) & \
(depth_pc_velo[:,1]>= cbox[1][0]) & \
(depth_pc_velo[:,2]< cbox[2][1]) & \
(depth_pc_velo[:,2]>= cbox[2][0])
depth_pc_velo=depth_pc_velo[depth_box_fov_inds]
return depth_pc_velo
def get_depth_pt3d(depth):
pt3d=[]
for i in range(depth.shape[0]):
for j in range(depth.shape[1]):
pt3d.append([i, j, depth[i, j]])
return np.array(pt3d)
def rotx(t):
''' 3D Rotation about the x-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
''' Rotation about the z-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def transform_from_rot_trans(R, t):
''' Transforation matrix from rotation matrix and translation vector. '''
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = | np.zeros_like(Tr) | numpy.zeros_like |
# -*- coding: utf-8 -*-
"""
RED Log Encodings
=================
Defines the *RED* log encodings:
- :func:`colour.models.log_encoding_REDLog`
- :func:`colour.models.log_decoding_REDLog`
- :func:`colour.models.log_encoding_REDLogFilm`
- :func:`colour.models.log_decoding_REDLogFilm`
- :func:`colour.models.log_encoding_Log3G10`
- :func:`colour.models.log_decoding_Log3G10`
- :func:`colour.models.log_encoding_Log3G12`
- :func:`colour.models.log_decoding_Log3G12`
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
- :cite:`Nattress2016a` : <NAME>. (2016). Private Discussion with
Shaw, N.
- :cite:`SonyImageworks2012a` : Sony Imageworks. (2012). make.py. Retrieved
November 27, 2014, from https://github.com/imageworks/OpenColorIO-Configs/\
blob/master/nuke-default/make.py
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.models.rgb.transfer_functions import (log_encoding_Cineon,
log_decoding_Cineon)
from colour.utilities import from_range_1, to_domain_1
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'log_encoding_REDLog', 'log_decoding_REDLog', 'log_encoding_REDLogFilm',
'log_decoding_REDLogFilm', 'log_encoding_Log3G10', 'log_decoding_Log3G10',
'log_encoding_Log3G12', 'log_decoding_Log3G12'
]
def log_encoding_REDLog(x, black_offset=10 ** ((0 - 1023) / 511)):
"""
Defines the *REDLog* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_encoding_REDLog(0.18) # doctest: +ELLIPSIS
0.6376218...
"""
x = to_domain_1(x)
y = (1023 + 511 * np.log10(x * (1 - black_offset) + black_offset)) / 1023
return from_range_1(y)
def log_decoding_REDLog(y, black_offset=10 ** ((0 - 1023) / 511)):
"""
Defines the *REDLog* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_decoding_REDLog(0.637621845988175) # doctest: +ELLIPSIS
0.1...
"""
y = to_domain_1(y)
x = ((10 ** ((1023 * y - 1023) / 511)) - black_offset) / (1 - black_offset)
return from_range_1(x)
def log_encoding_REDLogFilm(x, black_offset=10 ** ((95 - 685) / 300)):
"""
Defines the *REDLogFilm* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_encoding_REDLogFilm(0.18) # doctest: +ELLIPSIS
0.4573196...
"""
return log_encoding_Cineon(x, black_offset)
def log_decoding_REDLogFilm(y, black_offset=10 ** ((95 - 685) / 300)):
"""
Defines the *REDLogFilm* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_decoding_REDLogFilm(0.457319613085418) # doctest: +ELLIPSIS
0.1799999...
"""
return log_decoding_Cineon(y, black_offset)
def log_encoding_Log3G10(x, legacy_curve=False):
"""
Defines the *Log3G10* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
legacy_curve : bool, optional
Whether to use the v1 *Log3G10* log encoding curve. Default is *False*.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
- The v1 *Log3G10* log encoding curve is the one used in *REDCINE-X beta
42*. *Resolve 12.5.2* also uses the v1 curve. *RED* is planning to use
v2 *Log3G10* log encoding curve in the release version of the
*RED SDK*.
Use the `legacy_curve=True` argument to switch to the v1 curve for
compatibility with the current (as of September 21, 2016) *RED SDK*.
- The intent of the v1 *Log3G10* log encoding curve is that zero maps to
zero, 0.18 maps to 1/3, and 10 stops above 0.18 maps to 1.0.
The name indicates this in a similar way to the naming conventions of
*Sony HyperGamma* curves.
The constants used in the functions do not in fact quite hit these
values, but rather than use corrected constants, the functions here
use the official *RED* values, in order to match the output of the
*RED SDK*.
For those interested, solving for constants which exactly hit 1/3
and 1.0 yields the following values::
B = 25 * (np.sqrt(4093.0) - 3) / 9
A = 1 / np.log10(B * 184.32 + 1)
where the function takes the form::
Log3G10(x) = A * np.log10(B * x + 1)
Similarly for *Log3G12*, the values which hit exactly 1/3 and 1.0
are::
B = 25 * (np.sqrt(16381.0) - 3) / 9
A = 1 / np.log10(B * 737.28 + 1)
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_encoding_Log3G10(0.18, legacy_curve=True) # doctest: +ELLIPSIS
0.3333336...
>>> log_encoding_Log3G10(0.0) # doctest: +ELLIPSIS
0.0915514...
"""
x = to_domain_1(x)
if legacy_curve:
y = np.sign(x) * 0.222497 * np.log10((np.abs(x) * 169.379333) + 1)
else:
y = (np.sign(x + 0.01) * 0.224282 *
np.log10((np.abs(x + 0.01) * 155.975327) + 1))
return from_range_1(y)
def log_decoding_Log3G10(y, legacy_curve=False):
"""
Defines the *Log3G10* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
legacy_curve : bool, optional
Whether to use the v1 *Log3G10* log encoding curve. Default is *False*.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_decoding_Log3G10(1.0 / 3, legacy_curve=True) # doctest: +ELLIPSIS
0.1799994...
>>> log_decoding_Log3G10(1.0) # doctest: +ELLIPSIS
184.3223476...
"""
y = to_domain_1(y)
if legacy_curve:
x = (np.sign(y) * (10.0 ** (np.abs(y) / 0.222497) - 1) / 169.379333)
else:
x = (np.sign(y) * (10.0 **
(np.abs(y) / 0.224282) - 1) / 155.975327) - 0.01
return from_range_1(x)
def log_encoding_Log3G12(x):
"""
Defines the *Log3G12* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_encoding_Log3G12(0.18) # doctest: +ELLIPSIS
0.3333326...
"""
x = to_domain_1(x)
y = np.sign(x) * 0.184904 * np.log10(( | np.abs(x) | numpy.abs |
"""
Example implementation
- TODO >> I should eventually delete this file altogether
"""
import numpy as np
import matplotlib.pylab as plt
# Global Variables
__input_file__ = "dataset.txt"
__context_size__ = 100
__arm_size__ = 10
# Function to calculate the Cummulative Take Rate
def calc_CTR(reward, ctr_num, ctr_den):
# Keeps a count of CTR and updates the numerator and denominator on every correct call
ctr_num = ctr_num + reward
ctr_den = ctr_den + 1
ctr = ctr_num/ctr_den
return ctr, ctr_num, ctr_den
# linUCB Algorithm
def linUCB(data, __alpha__):
# Dictionary to store CTR at every time step
CTR = {}
# Declare the coefficient, upper confidence bound dictionary and the other dictionaries
# used for plotting the various graphs
coeff = {}
ucb = {}
ucb_mean = {}
arm_pred = {}
correct_pred = {}
# Declare dictionaries A and b
A = {}
b = {}
for arm in range(1,__arm_size__ + 1):
# Initialize matrix A and b for each arm, with dimension as 100 which is the size of context
A[arm] = np.identity(__context_size__) # dimension 100 * 100
b[arm] = np.atleast_2d(np.zeros(__context_size__)).T # dimension 100 * 1
# Initialize UCBs as 0 for all arms
ucb[arm] = 0
# Initialize arm prediction for each arm - to plot it later
arm_pred[arm] = 0
correct_pred[arm] = 0
ucb_mean[arm] = 0
# Initiate time, predicted arm and numerator and denominators of CTR
time = 1
ctr_num = 0
ctr_den = 0
# Iterate over all 10,000 data point
for line in data:
# Fetch individual values to perform operations on them
values = line.split()
# Convert all strings in the list to integer
values = list(map(int, values))
# Get the Arm - which is the action performed
curr_arm = values.pop(0)
# Get reward for the current action
reward = values.pop(0)
# Create the context array
context = np.asarray(values)
# Loop to update coefficient and calculate pay off for each arm
for arm in range(1,__arm_size__ + 1):
# Calculate the coefficent, standard deviation and UCB for the arm
coeff[arm] = np.dot(np.linalg.inv(A[arm]), b[arm])
standard_deviation = np.sqrt(np.dot(context.T, np.dot(np.linalg.inv(A[arm]), context)))
# Method 1 - Prediction count dependent alpha
if(__alpha__ == -1):
denom = 0.001/((correct_pred[arm]+1)/10)
ucb[arm] = np.asscalar(np.dot(coeff[arm].T, context) + np.dot(denom, standard_deviation))
elif(__alpha__ == 0):
# Method 2 - Time Dependent Alpha
ucb[arm] = np.asscalar( | np.dot(coeff[arm].T, context) | numpy.dot |
import numpy as np
from numpy import pi, unravel_index
from PyQt5 import QtCore
import pyqtgraph as pg
from scipy.fftpack import fft, fftshift
from acconeer_utils.clients.reg.client import RegClient, RegSPIClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
import logging
log = logging.getLogger("acconeer_utils.examples.obstacle_detection")
MAX_SPEED = 8.00 # Max speed to be resolved with FFT in cm/s
WAVELENGTH = 0.49 # Wavelength of radar in cm
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
elif args.spi:
client = RegSPIClient()
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
sensor_config = get_sensor_config()
processing_config = get_processing_config()
sensor_config.sensor = args.sensors
client.setup_session(sensor_config)
pg_updater = PGUpdater(sensor_config, processing_config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = ObstacleDetectionProcessor(sensor_config, processing_config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_sensor_config():
config = configs.IQServiceConfig()
config.range_interval = [0.1, 0.5]
config.sweep_rate = int( | np.ceil(MAX_SPEED * 4 / WAVELENGTH) | numpy.ceil |
import hashlib
import itertools
import json
import numpy as np
from sklearn.manifold import TSNE
class Orientation(object):
def __init__(self, direction):
if direction == 'both':
self.direction = direction
elif direction == True:
self.direction = 'left'
elif direction == False:
self.direction = 'right'
def __eq__(self, value):
if self.direction == 'both' or value.direction == 'both':
return True
if self.direction == value.direction:
return True
return False
def __str__(self):
if self.direction == 'both':
return self.direction
elif self.direction == 'left':
return 'True'
elif self.direction == 'right':
return 'False'
def get_hash(d):
"""
Get the hash of a dictionary
TODO: Check if it is better to change from MD5 to SHA1
Parameters
----------
d: dict
The dictionary to hash
Returns
-------
String
The md5 hash of the dictionary
"""
return hashlib.md5(json.dumps(d, sort_keys=True, default=str).encode('utf-8')).hexdigest()
def normalize(array):
"""
Normalize a 1d numpy array between [0,1]
Parameters
----------
array: ndarray
the array to normalize
Returns
-------
ndarray
the normalized array
"""
ptp = np.ptp(array)
if ptp != 0:
return (array - np.min(array)) / np.ptp(array)
else:
return np.ones_like(array)
def matching_items(d1, d2):
matching_keys = []
common_keys = d1.keys() & d2.keys()
for k in common_keys:
if d1[k] == d2[k]:
matching_keys.append(k)
return matching_keys
def merge_dictionaries_with_disagreements(d1, d2):
merge = {**d1, **d2}
common_keys = d1.keys() & d2.keys()
for k in common_keys:
if d1[k] != d2[k]:
merge.pop(k)
return merge
def get_positions_from_labels(ys):
positions = np.zeros([len(ys), 2])
classes = np.unique(ys)
num_pts = len(classes)
indices = np.arange(0, num_pts, dtype=float) + 0.5
r = np.sqrt(indices / num_pts)
theta = np.pi * (1 + 5 ** 0.5) * indices
means = np.transpose([r * | np.cos(theta) | numpy.cos |
import numpy as np
from matplotlib import animation
from matplotlib.widgets import Button, Slider
import matplotlib.pyplot as plt
from Plotter import dataset, figret
import math,random
class constraints:
def __init__(self,f,val,max,min):
self.f = f
self.max = max
self.min=min
self.val = val
class dataform:
def __init__(self,data,min,max,lrate=None,name=None,cons=None):
self.data=data
self.min=min
self.max=max
self.lrate=lrate
self.name=name
self.constraints=cons
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
def find_nearest_point(arrayy,arrayx,x,y):
arrayy = np.asarray(arrayy)
arrayx = np.asarray(arrayx)
newx=None
newy=None
for i in range(len(arrayy)):
if(newx==None):
newx=arrayx[i]
newy=arrayy[i]
else:
r=np.abs(((newx**2) + (newy**2)) - (x**2 + y**2))
rp = np.abs((arrayx[i]**2) + (arrayy[i]**2) - (x**2 + y**2))
if( rp<r):
newx = arrayx[i]
newy = arrayy[i]
return newx,newy
def getGradient(y: [], x: [], i: int = None, minchange: float = 1e-4, delfactor: float = 0.5):
a = None
b = None
delf = None
if (i == None):
a = (y[-1] - y[-2])
b = (x[-1] - x[-2])
else:
a = (y[i] - y[i - 1])
b = (x[i] - x[i - 1])
#todo add second order functionality
# if it is not greater than minchange in x it be a factor of the y
if (np.abs(b) >= minchange and np.abs(a)>0):
grad = (b) / (a)
else:
rand = np.random.rand()
grad = x[-1]*((rand-0.5))*5e-1
# if (np.abs(grad) == np.nan or np.abs(grad) < minchange):
# grad = delfactor * a
if(math.isnan(grad)):
print("asfd")
return grad
class Fitter:
def __init__(self, func, ds: dataset, figret: figret, fargs:[dataform],max_int:int=10000,show_error:bool=True,grad:bool=True,manual_mode:bool=False):
tem = []
for i in fargs:
tem.append(i.data)
self.iteration=0
self.dataform=fargs
self.args = tuple(tem)
self.ds = ds
self.func = func
self.figret = figret
self.errors = []
self.grad=grad
self.all_error=[]
self.reset_count = 0
self.resetmax=10
self.slider_changed=False
self.pkfound = False
self.error_count=0
self.oldargs = []
self.oldargs_byparameter=[]
self.slopes_byparameter=[]
self.slope_signchangecount=[]
self.slope_signchangecountmax=6
self.cur_par = 0
self.cur_par_count = 0
self.par_opt_len = 6
self.learning_rate = 0.001
self.org_learning_rate = 0.001
self.close_error=2
self.chi=-1
self.lcount=0
self.maxlcount=3
self.figret.ax.errorbar(ds.x, ds.y, yerr=ds.y_error, xerr=ds.x_error, c="C1", fmt='o',ms=3)
self.maxinter=max_int+1
self.errorplot=None
self.show_error=show_error
self.errorSlider=None
self.random_search=True
self.complete=False
self.updating_sliders = False
self.cur_err_par=0
self.pause=False
self.errorBetterCount=0
self.errorBetterCountMax=10
self.manual_mode = manual_mode
if(self.manual_mode != True):
self.ani = animation.FuncAnimation(figret.fig, self.update, interval=10, blit=False, repeat=False,
frames=self.maxinter)
axnext = self.figret.pyplt.axes([0.86, 0.25, 0.1, 0.075])
self.bpause = Button(axnext, 'Play/Pause')
axshowbesst = self.figret.pyplt.axes([0.86, 0.25 + 0.075, 0.1, 0.075])
self.showbest = Button(axshowbesst, 'Show Best')
self.slider_array = []
self.figret.pyplt.subplots_adjust(left=0.25, bottom=0.5)
self.buildSliders()
# self.figret.fig.canvas.mpl_connect('button_press_event', self.playpause)
self.bpause.on_clicked(self.playpause)
self.showbest.on_clicked(self.showBest)
def run(self):
self.figret.pyplt.show()
def buildSliders(self):
axcolor = 'lightgoldenrodyellow'
for j in range(len(self.dataform)):
df = self.dataform[j]
ax = self.figret.pyplt.axes([0.15, 0.40 - (0.03*(j+1)), 0.65, 0.03], facecolor=axcolor)
step=(df.max- df.min)/100
slider = Slider(ax, df.name, df.min, df.max, valinit=df.data, valstep=step)
slider.on_changed(self.updateSlider)
self.slider_array.append(slider)
def updateSlider(self,event):
self.slider_changed=True
if(self.manual_mode == True and self.updating_sliders == False):
self.update(self.iteration)
self.reset_count = 1
def showBest(self,event):
if(self.manual_mode != True):
if (self.pause == False):
self.playpause(event)
minarg = np.array(self.all_error).argmin()
self.arg = self.oldargs[minarg]
newds = self.func(*self.arg)
plots = self.figret.plots
tplots = plots[0]
# updaating sliders
for j in range(len(self.slider_array)):
self.slider_array[j].set_val(self.arg[j])
try:
if (len(tplots) > 0):
tplots[0].set_ydata(newds.y)
else:
plots[0].set_ydata(newds.y)
except Exception:
plots[0].set_ydata(newds.y)
elif(self.manual_mode == True):
self.run_finished()
minarg = np.array(self.all_error).argmin()
self.arg = self.oldargs[minarg]
newds = self.func(*self.arg)
plots = self.figret.plots
tplots = plots[0]
# updaating sliders
for j in range(len(self.slider_array)):
self.slider_array[j].set_val(self.arg[j])
try:
if (len(tplots) > 0):
tplots[0].set_ydata(newds.y)
else:
plots[0].set_ydata(newds.y)
except Exception:
plots[0].set_ydata(newds.y)
def playpause(self,event):
if(self.manual_mode != True):
if(self.pause==False):
self.ani.event_source.stop()
self.pause=True
else:
self.ani.event_source.start()
self.pause=False
else:
minarg = np.array(self.all_error).argmin()
self.arg = self.oldargs[minarg]
newds = self.func(*self.arg)
plots = self.figret.plots
tplots = plots[0]
# updaating sliders
for j in range(len(self.slider_array)):
self.slider_array[j].set_val(self.arg[j])
try:
if (len(tplots) > 0):
tplots[0].set_ydata(newds.y)
else:
plots[0].set_ydata(newds.y)
except Exception:
plots[0].set_ydata(newds.y)
def update(self, i):
self.iteration = i
f = self.func
args = self.args
if(self.slider_changed):
self.slider_changed=False
temp=[]
for j in self.slider_array:
temp.append(j.val)
args=tuple(temp)
# updaating sliders
self.args = args
#check for constraints
if(self.manual_mode != True):
for df in self.dataform:
dfcon = df.constraints
if(dfcon is not None):
for tc in dfcon:
tf = tc.f
tmax = tc.max
tmin = tc.min
targ = self.args
tv = tf(targ)
maxrecur =9000
count =0
while(tv>tmax or tv<tmin or count>maxrecur):
print("constraint hit")
print(count)
self.args = self.startNewPar(self.cur_par, curParam=False, completelyrandom=True)
targ = self.args
tv = tf(targ)
count +=1
# if (self.iteration >= 1):
# self.iteration -= 1
# else:
# self.iteration = 0
#gets newds with this iterations parameters
newds = f(*args)
#get error of newrun
er = self.errorCalc(newds)
#will use to update blueline later
update_blue=False
if(len(self.all_error)>1):
miner = min(self.all_error)
if(er<miner):
update_blue=True
# adds to current parameters errors
try:
self.errors[self.cur_par].append(er)
except Exception as e:
# extends errors to hold the current parameters.
# todo chang to if
self.errors.append([er])
if(self.grad==False):
# adds history of old arguments
try:
self.oldargs_byparameter[self.cur_par].append(self.args[self.cur_par])
except Exception as e:
# extends list of arguments
# todo change to if
self.oldargs_byparameter.append([self.args[self.cur_par]])
# adds to all error
self.all_error.append(er)
# adds args as tuple to history
self.oldargs.append(self.args)
if(self.manual_mode != True):
if(self.grad == False):
if (self.cur_par_count == 0):
if (len(self.all_error) > self.par_opt_len):
if (self.all_error[-1] > min(self.all_error[-self.par_opt_len:-1])):
tt = np.argmin(self.all_error[-self.par_opt_len:-1])
self.args = list(self.args)
self.args[self.cur_par] = self.oldargs[-(self.par_opt_len - tt)][self.cur_par]
self.args = tuple(self.args)
print("reverting set and slowing down previous")
cp = self.cur_par
cp = cp-1
if(cp<0):
cp=len(self.args)-1
if(self.dataform[cp].lrate>1e-3):
self.dataform[cp].lrate = self.dataform[cp].lrate * 0.5
self.cur_par_count+=1
return
if (self.manual_mode != True):
if(self.grad==False):
if(len(self.errors) == len(self.args)):
if(len(self.errors[self.cur_par])>5):
if(self.all_error[-1]>self.close_error):
if(np.abs((self.errors[self.cur_par][-1] - self.errors[self.cur_par][-2])/self.errors[self.cur_par][-1])<1e-1):
if(self.dataform[self.cur_par].lrate<1e3):
self.dataform[self.cur_par].lrate =self.dataform[self.cur_par].lrate*2
print("speed increased")
if (self.random_search):
if(self.reset_count==0):
if(np.abs((self.all_error[-1] - min(self.all_error))/min(self.all_error))>3):
self.args = self.oldargs[-2]
self.reset_count+=1
print("reverting random")
self.cur_par_count += 1
return
if (np.abs((self.all_error[-1] - min(self.all_error))/min(self.all_error)) >25):
minar = np.argmin(self.all_error)
self.args=self.oldargs[minar]
print("reverting terrible")
self.cur_par_count += 1
return
#Plots error
if(self.show_error and len(self.errors)== len(self.args)):
self.plotError(i)
newargs = list(args)
if (self.manual_mode != True):
#need at least two runs to start the process
if (self.errors!=None and len(self.errors[self.cur_par])>=2):
if(self.grad):
for k in range(len(args)):
self.update_param(k,newargs)
else:
newargs=list(args)
newargs = self.update_param(self.cur_par,newargs)
else:
#start random if we don't have enough to get a gradient
newargs = list(self.startNewPar(self.cur_par,curParam=True,center=True))
if(self.grad == False):
#checks if we need to move to a new parameter
if (self.cur_par_count >= self.par_opt_len):
self.cur_par += 1
self.cur_par_count = 0
if (self.cur_par > (len(args) - 1)):
self.cur_par = 0
####updating plot
self.args = newargs
if(update_blue):
plots = self.figret.plots
tplots = plots[0]
try:
if(len(tplots)>0):
tplots[0].set_ydata(newds.y)
else:
plots[0].set_ydata(newds.y)
except Exception:
plots[0].set_ydata(newds.y)
self.figret.ax.set_title("Iterataion: " + str(i) + " Current Parameter: " + str(self.cur_par) +" cur_par_count: "+str(self.cur_par_count)+ " Chi: " + str("{:.4f}".format(self.chi) ))
#updaating sliders
self.updating_sliders=True
for j in range(len(self.slider_array)):
self.slider_array[j].set_val(self.args[j])
self.updating_sliders = True
self.updating_sliders = False
self.slider_changed=False
#when finished
if(i>=self.maxinter-3):
print("run finished called")
self.run_finished()
def update_param(self,param,newargs):
done=False
close=False
k=param
args = self.args
tw = np.matrix(self.oldargs)
allerr = self.all_error
tw1 = tw[:, k]
tw2 = []
for j in tw1:
tw2.append(float(j[0]))
ter = self.all_error
if(min(ter)<=self.close_error):
close=True
grad = getGradient(y=ter, x=tw2)
try:
self.slopes_byparameter[k].append(grad)
except Exception as e:
# extends list of arguments
# todo change to if
self.slopes_byparameter.append([grad])
gfactor = grad * (self.dataform[k].lrate)
diff = self.dataform[k].max - self.dataform[k].min
if (np.abs(gfactor) > np.abs(diff / 10)):
gfactor = (gfactor / np.abs(gfactor)) * diff / 10
a = args[k] - gfactor
#todo make add_min param
add_min = 1e-4
if(self.dataform[k].min != 0):
add_min = self.dataform[k].min /100
if (a <= self.dataform[k].min):
a = self.dataform[k].min + add_min
if(self.dataform[self.cur_par].lrate<1e-3):
self.dataform[self.cur_par].lrate = self.dataform[self.cur_par].lrate * 0.5
print("barrier slowed")
elif (a >= self.dataform[k].max):
a = self.dataform[k].max - add_min
if (self.dataform[self.cur_par].lrate < 1e-3):
self.dataform[self.cur_par].lrate = self.dataform[self.cur_par].lrate * 0.5
print("barrier slowed")
if(close):
newargs[k] = a
if (len(self.slopes_byparameter) == len(args)):
for zz in range(len(args)):
if (len(self.slopes_byparameter[zz]) > 3):
s1 = self.slopes_byparameter[zz][-1]
s2 = self.slopes_byparameter[zz][-2]
if (np.abs(s1) != 0):
sign1 = s1 / np.abs(s1)
else:
sign1 = 0
if (np.abs(s2) != 0):
sign2 = s2 / np.abs(s2)
else:
sign2 = 0
if (len(self.slope_signchangecount) == zz):
self.slope_signchangecount.append(0)
if (sign1 != sign2 and sign1 != 0 and sign2 != 0):
try:
self.slope_signchangecount[zz] += 1
except Exception as e:
# extends errors to hold the current parameters.
# todo chang to if
self.slope_signchangecount.append(1)
else:
try:
self.slope_signchangecount[zz] = 0
except Exception as e:
# extends errors to hold the current parameters.
# todo chang to if
self.slope_signchangecount.append(0)
while (len(self.slope_signchangecount) < zz + 1):
self.slope_signchangecount.append(0)
if(len(self.errors[self.cur_par])>3):
if (self.slope_signchangecount[zz] > self.slope_signchangecountmax and min(self.all_error)< 10*self.close_error):
if(self.dataform[zz].lrate > 1e-2):
self.slope_signchangecount[zz] = 0
self.dataform[zz].lrate = self.dataform[zz].lrate * 0.5
print("slowed")
if(self.random_search):
if (len(tw2) > 2):
if (np.abs(tw2[-1] - tw2[-2]) < 1e-2 and np.abs(self.all_error[-1] - self.all_error[-2]) < 1e-2 and self.all_error[-1] < 10 and done == False):
if (self.reset_count < self.resetmax):
self.reset_count += 1
else:
self.reset_count = 0
a = self.startNewPar(curpar=k)[k]
# if (self.dataform[k].lrate < 1e2):
# self.dataform[k].lrate = self.dataform[k].lrate * 1.01
newargs[k] = a
self.cur_par_count += 1
print("newparam b " + str(self.iteration))
done = True
if (self.all_error[-1] > min(self.all_error) * 10 and done == False and close==False):
if (self.reset_count < self.resetmax):
self.reset_count += 1
else:
self.reset_count = 0
cen = False
if (self.all_error[-1] > 10):
cen = True
a = self.startNewPar(curpar=k, center=cen)[k]
self.cur_par_count += 1
print("newparam a "+str(self.iteration))
newargs[k] = a
done=True
if (len(tw2) > 3):
if (np.abs((tw2[-1] - tw2[-2])) < 1e-4 and np.abs((self.all_error[-1] - self.all_error[-2])/self.all_error[-2])<5e-4):
if (self.dataform[k].lrate < 1e2):
self.dataform[k].lrate = self.dataform[k].lrate * 1.5
print("speeding up")
self.cur_par_count += 1
newargs[k] = a
if(self.random_search):
if (len(allerr) > 10):
avg10b = np.average(allerr[-10:-5])
avg5b = np.average(allerr[-5:-1])
stdev5b = np.std(allerr[-5:-1])
max5b = max(allerr[-5:-1])
if ( avg5b > 10 and stdev5b < max5b /20 and done==False):
if (self.reset_count < self.resetmax):
self.reset_count += 1
else:
self.reset_count = 0
if ((avg10b + avg5b) < min(allerr) * 2):
newargs = self.startNewPar(curpar=k, center=False, curParam=False)
print("newargs a avg10b=" + str(avg10b) + " avg5b=" + str(avg5b) + " min:" + str(min(allerr)) +" "+str(self.iteration))
else:
if(close==False):
newargs = self.startNewPar(curpar=k, center=True, curParam=False)
print("newargs b "+str(self.iteration))
return newargs
def run_finished(self):
if(self.manual_mode!=True):
self.showBest(None)
self.figret.pyplt.close('all')
print("Finished")
self.complete = True
def isErrorBetterThanOtherError(self):
self.errorBetterCount+=1
numofpar = len(self.args)
lserdiff = np.abs(self.errors[self.cur_par][-1] - self.errors[self.cur_par][-2])
if(len(self.errors)==len(self.args)):
ret = True
for i in range(numofpar):
if(i == self.cur_par):
continue
terdiff = np.abs(self.errors[i][-1] - self.errors[i][-2])
if(terdiff<lserdiff):
ret = False
break
else:
ret = False
if(ret==True and self.errorBetterCount>self.errorBetterCountMax):
self.errorBetterCount=0
ret = False
return ret
def startNewPar(self,curpar, center=False,curParam=True,completelyrandom=False):
args = self.args
newargs = []
for i in range(len(args)):
amin = self.dataform[i].min
amax = self.dataform[i].max
uselog = False
top = amax-amin
top = top+1
bottom = 1
if(np.abs(top/bottom)>100):
uselog =True
if(curParam==True):
if(completelyrandom == False):
if i == curpar:
a = args[self.cur_par]
minerind=np.argmin(self.all_error)
matr = np.matrix(self.oldargs)
bestarg = matr[minerind,i]
mu, sigma = bestarg, np.abs(amax-amin)/10 # mean and standard deviation
if (center and min(self.all_error)>self.close_error):
sigma=np.abs(amax-amin)/100
mu = (np.abs(amax - amin)/2)+ amin
elif(min(self.all_error)<self.close_error):
sigma=np.abs(amax-amin)/10000
s = np.random.normal(mu, sigma, 1)
if(s<amin):
s=amin
elif(s>amax):
s=amax
newargs.append(float(s))
else:
newargs.append(float(args[i]))
else:
if i == curpar:
if (uselog):
r = np.random.uniform(np.log10(amin), np.log10(amax))
newargs.append(float(10 ** r))
else:
r = np.random.uniform(amin, amax)
newargs.append(float(r))
else:
newargs.append(float(args[i]))
else:
if(completelyrandom==False):
if(len(self.all_error)>10):
a = args[i]
minerind = np.argmin(self.all_error)
matr = np.matrix(self.oldargs)
bestarg = matr[minerind, i]
mu, sigma = bestarg, np.sqrt(amax - amin) / 10 # mean and standard deviation
else:
center=True
if (center):
sigma =np.sqrt(amax - amin) / 10
mu = (np.abs(amax - amin) / 2) + amin
s = np.random.normal(mu, sigma, 1)
if (s < amin):
s = amin
elif (s > amax):
s = amax
newargs.append(float(s))
else:
if(uselog):
r = np.random.uniform(np.log10(amin), np.log10(amax))
newargs.append(float(10**r))
else:
r = np.random.uniform(amin,amax)
newargs.append(float(r))
return tuple(newargs)
def argMaxMinCheck(self,v):
amin = self.dataform[self.cur_par].min
amax = self.dataform[self.cur_par].max
# if(type(v) is tuple):
# print("asdf")
# if (v < amin):
# print("asdf")
# elif (v > amax):
# print("asdf")
# def checkErrorAndCount(self):
# if(self.)
def errorCalc(self, newds: dataset):
y = newds.y
x = newds.x
cy = list(self.ds.y)
cx = list(self.ds.x)
xer = None
yer = None
if(self.ds.x_error is not None):
xer = list(self.ds.x_error)
if (self.ds.y_error is not None):
yer = list(self.ds.y_error)
yobs=[]
xobs = []
for tt in range(len(cy)):
tval,tind = find_nearest(x,cx[tt])
yobs.append(y[tind])
xobs.append(tval)
chi2 = self.chi2(yobs=yobs,yexp=cy,xobs=xobs,xexp=cx,x_error=xer,y_error=yer)
error = chi2
self.chi = chi2
if len(self.figret.plots)<2:
plotty = self.figret.ax.scatter(xobs, yobs, c="C2")
self.figret.plots.append(plotty)
else:
self.figret.plots[1].set_offsets(np.c_[xobs,yobs])
self.figret.fig.canvas.draw()
if(math.isnan(error)):
print("Error is NAN")
return error
def chi2(self,yobs,yexp,xobs=None,xexp=None,y_error=None,x_error=None):
r=0
islog=False
if(np.std(yobs)>100 or np.std(xobs)>100):
yobs = list(np.log10( | np.array(yobs) | numpy.array |
import onnxruntime_numpy as onp
from onnxruntime_numpy.types import float_types
import numpy as np
import pytest
from .utils import expect
def gemm_reference_implementation(A, B, C=None, alpha=1., beta=1., transA=0,
transB=0):
A = A if transA == 0 else A.T
B = B if transB == 0 else B.T
C = C if C is not None else np.array(0)
Y = alpha * np.dot(A, B) + beta * C
return Y
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_all_attribute(type_a):
a = np.random.ranf([4, 3]).astype(type_a)
b = np.random.ranf([5, 4]).astype(type_a)
c = np.random.ranf([1, 5]).astype(type_a)
expected = gemm_reference_implementation(
a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)
result = onp.gemm(onp.array(a, dtype=type_a),
onp.array(b, dtype=type_a),
onp.array(c, dtype=type_a),
transA=True,
transB=True,
alpha=0.25,
beta=0.35)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_alpha(type_a):
a = np.random.ranf([3, 5]).astype(type_a)
b = np.random.ranf([5, 4]).astype(type_a)
c = np.zeros([1, 4]).astype(type_a)
expected = gemm_reference_implementation(a, b, c, alpha=0.5)
result = onp.gemm(onp.array(a, dtype=type_a),
onp.array(b, dtype=type_a),
onp.array(c, dtype=type_a),
alpha=0.5)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_beta(type_a):
a = np.random.ranf([3, 5]).astype(type_a)
b = np.random.ranf([5, 4]).astype(type_a)
c = np.zeros([1, 4]).astype(type_a)
expected = gemm_reference_implementation(a, b, c, beta=0.5)
result = onp.gemm(onp.array(a, dtype=type_a),
onp.array(b, dtype=type_a),
onp.array(c, dtype=type_a),
beta=0.5)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_default_matrix_bias(type_a):
a = np.random.ranf([3, 6]).astype(type_a)
b = np.random.ranf([6, 4]).astype(type_a)
c = np.random.ranf([3, 4]).astype(type_a)
expected = gemm_reference_implementation(a, b, c)
result = onp.gemm(onp.array(a, dtype=type_a),
onp.array(b, dtype=type_a),
onp.array(c, dtype=type_a))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_default_no_bias(type_a):
a = np.random.ranf([2, 10]).astype(type_a)
b = np.random.ranf([10, 3]).astype(type_a)
expected = gemm_reference_implementation(a, b)
result = onp.gemm(onp.array(a, dtype=type_a),
onp.array(b, dtype=type_a))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_gemm_default_scalar_bias(type_a):
a = | np.random.ranf([2, 10]) | numpy.random.ranf |
from hypothesis import given
import numpy as np
from functools import reduce
import utils_hypothesis
@given(shapes=utils_hypothesis.get_boardcastable_arrays_shapes())
def test_get_boardcastable_arrays_shapes(shapes):
arrays = [np.ones(shape) for shape in shapes]
reduce(np.add, arrays)
@given(shapes_indicies=utils_hypothesis.get_boardcastable_arrays_shapes_and_indices())
def test_get_boardcastable_arrays_shapes_and_indices(shapes_indicies):
shapes, indices, shape = shapes_indicies
arrays = [np.ones(shape) for shape in shapes]
array = reduce(np.add, arrays)
_ = array[indices]
np.testing.assert_array_equal(shape, array.shape)
@given(shapes=utils_hypothesis.get_chainable_array_shapes())
def test_get_chainable_array_shapes(shapes):
arrays = [np.ones(shape) if len(shape) == 2 else np.diag(np.ones(shape)) for shape in shapes]
reduce(np.dot, arrays)
@given(shapes_indicies=utils_hypothesis.get_chainable_arrays_shapes_and_indices())
def test_get_chainable_array_shapes_and_indicies(shapes_indicies):
shapes, indices, shape = shapes_indicies
arrays = [np.ones(shape) if len(shape) == 2 else np.diag(np.ones(shape)) for shape in shapes]
array = reduce(np.dot, arrays)
np.testing.assert_array_equal(array.shape, shape)
array[indices]
@given(n_index_axis=utils_hypothesis.get_vector_index_axis())
def test_get_vector_index_axis(n_index_axis):
n, index, axis = n_index_axis
a = np.diag(np.ones(n))
| np.testing.assert_array_equal(a[index, :], a[:, index].T) | numpy.testing.assert_array_equal |
import numpy as np
import cv2
from scipy import signal
from scipy import misc
from scipy.ndimage import binary_opening
import skimage
from skimage.transform import hough_line, hough_line_peaks
from skimage.feature import canny
from skimage.draw import line
from skimage import data
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from matplotlib import cm
scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
[-10+0j, 0+ 0j, +10 +0j],
[ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
sobel = np.asarray([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
sobel_x = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]], dtype=np.float)
sobel_y = np.array([[1, 2, 1],
[0, 0, 0],
[-1,-2,-1]], dtype = np.float)
def remove_diagonal(X):
X_ = X.copy()
n = X.shape[0]
for i in range(-30, 30):
x = range(n)
y = [x_ + i for x_ in x]
if i != 0:
x = x[abs(i):-abs(i)]
y = y[abs(i):-abs(i)]
X_[x,y] = 0
return X_
def convolve_array(X, cfilter=scharr):
grad = signal.convolve2d(X, cfilter, boundary='symm', mode='same')
X_conv = np.absolute(grad)
return X_conv
def convolve_array_sobel(X, cfilter=sobel):
grad_x = signal.convolve2d(X, sobel_x, boundary='symm', mode='same')
grad_y = signal.convolve2d(X, sobel_y, boundary='symm', mode='same')
grad = np.hypot(grad_x, grad_y)
X_conv = np.absolute(grad)
return X_conv
def convolve_array_tile(X, cfilter=sobel, divisor=49):
"""
Iteratively convolve equal sized tiles in X, rejoining for fast convolution of the whole
"""
x_height, x_width = X.shape
assert x_height == x_width, "convolve_array expects square matrix"
# Find even split for array
divisor = divisor
tile_height = None
while (not tile_height) or (int(tile_height) != tile_height):
# iterate divisor until whole number is found
divisor += 1
tile_height = x_height / divisor
tile_height = int(tile_height)
# Get list of tiles
tiled_array = X.reshape(divisor, tile_height, -1, tile_height)\
.swapaxes(1, 2)\
.reshape(-1, tile_height, tile_height)
# Convolve tiles iteratively
tiled_array_conv = np.array([convolve_array(x, cfilter=cfilter) for x in tiled_array])
# Reconstruct original array using convolved tiles
X_conv = tiled_array_conv.reshape(divisor, divisor, tile_height, tile_height)\
.swapaxes(1, 2)\
.reshape(x_height, x_width)
return X_conv
def binarize(X, bin_thresh, filename=None):
X_bin = X.copy()
X_bin[X_bin < bin_thresh] = 0
X_bin[X_bin >= bin_thresh] = 1
if filename:
skimage.io.imsave(filename, X_bin)
return X_bin
def diagonal_gaussian(X, gauss_sigma, filename=False):
d = X.shape[0]
X_gauss = X.copy()
diag_indices_x, diag_indices_y = np.diag_indices_from(X_gauss)
for i in range(1,d):
diy = | np.append(diag_indices_y, diag_indices_y[:i]) | numpy.append |
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import applications
from keras import backend as K
#from keras_squeezenet import SqueezeNet
import tensorflow as tf
from influence import Influence, plot_mnist
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.preprocessing import normalize
def retrain_inception_on_flowers(seed=22, verbose=True, new_training_data=False):
# dimensions of our images.
img_width, img_height = 299, 299
nb_train_samples = 1000
nb_validation_samples = 200
epochs = 10
batch_size = 20
tf.set_random_seed(seed)
np.random.seed(seed)
train_shuffle_idx = np.random.permutation(nb_train_samples)
train_data = np.load('data/flower_photos/bottleneck_features_train-inception.npy')[train_shuffle_idx]
train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)).reshape(-1,1)[train_shuffle_idx]
valid_shuffle_idx = np.random.permutation(nb_validation_samples)
validation_data = np.load('data/flower_photos/bottleneck_features_validation-inception.npy')[valid_shuffle_idx]
validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)).reshape(-1,1)[valid_shuffle_idx]
if new_training_data:
il = ImageLoader()
train_data = np.zeros((nb_train_samples, 2048))
for i in range(nb_train_samples):
if (i%100==0):
print("Regenerating training data:",i)
pic, feature = il.load_candidate_image(train_shuffle_idx[i], img_folder='train', dataset='flowers', model='Inception')
train_data[i] = feature
validation_data = np.zeros((nb_validation_samples, 2048))
for i in range(nb_validation_samples):
if (i%100==0):
print("Regenerating validation data:",i)
pic, feature = il.load_candidate_image(valid_shuffle_idx[i], img_folder='validation', dataset='flowers', model='Inception')
validation_data[i] = feature
tf.reset_default_graph()
tf.set_random_seed(seed)
g = tf.Graph()
with g.as_default():
tf.set_random_seed(seed)
X = tf.placeholder(tf.float32, shape=(None, 2048))
W_flat = tf.get_variable("all_weights",[2049])
W = tf.reshape(W_flat[0:2048], [2048, 1])
b = tf.reshape(W_flat[2048], [1])
y = tf.nn.sigmoid(tf.matmul(X, W) + b)
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session(graph=g)
np.random.seed(seed)
tf.set_random_seed(seed)
sess.run(tf.global_variables_initializer())
for j in range(epochs):
for i in range(10):
batch_xs = train_data[100*i:100*(i+1)]
batch_ys = train_labels[100*i:100*(i+1)]
_, acc, cp = sess.run([train_step, accuracy, correct_prediction], feed_dict={X: batch_xs, y_: batch_ys})
if verbose:
print('Epoch '+str(j) + ', Batch Acc=' + str(acc))
print("Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
return g, sess, W_flat, cross_entropy, X, y, y_, train_data, train_labels, train_shuffle_idx, valid_shuffle_idx
def retrain_VGG_on_flowers(seed=22, verbose=True):
global train_shuffle_idx, valid_shuffle_idx
np.random.seed(seed)
label_names = ['Cat','Dog']
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'vgg16_weights.h5'
nb_train_samples = 1000
nb_validation_samples = 200
epochs = 10
batch_size = 20
train_shuffle_idx = np.random.permutation(nb_train_samples)
train_data = np.load('data/flower_photos/bottleneck_features_train-vgg.npy').reshape(-1,4*4*512)[train_shuffle_idx]
train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)).reshape(-1,1)[train_shuffle_idx]
valid_shuffle_idx = np.random.permutation(nb_validation_samples)
validation_data = np.load('data/flower_photos/bottleneck_features_validation-vgg.npy').reshape(-1,4*4*512)[valid_shuffle_idx]
validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)).reshape(-1,1)[valid_shuffle_idx]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, 4*4*512))
W_flat = tf.Variable(tf.zeros([train_data.shape[1]+1]),name="W_flat")
W = tf.reshape(W_flat[:-1], [train_data.shape[1], 1])
b = W_flat[-1]
y = tf.nn.sigmoid(tf.matmul(X, W) + b)
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for j in range(epochs):
for i in range(10):
batch_xs = train_data[100*i:100*(i+1)]
batch_ys = train_labels[100*i:100*(i+1)]
_, acc, cp = sess.run([train_step, accuracy, correct_prediction], feed_dict={X: batch_xs, y_: batch_ys})
if verbose:
print('Epoch '+str(j) + ', Batch Acc=' + str(acc))
print("Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
return sess, W_flat, cross_entropy, X, y, y_, train_data, train_labels, train_shuffle_idx, valid_shuffle_idx
def train_squeeze_on_cats_dogs(seed=43, verbose=True):
np.random.seed(seed)
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 10
batch_size = 16
train_shuffle_idx = np.random.permutation(nb_train_samples)
train_data = np.load('bottleneck_features_train-squeeze.npy').reshape(-1,13*13*512)[train_shuffle_idx]
train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)).reshape(-1,1)[train_shuffle_idx]
valid_shuffle_idx = np.random.permutation(nb_validation_samples)
validation_data = np.load('bottleneck_features_validation-squeeze.npy').reshape(-1,13*13*512)[valid_shuffle_idx]
validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)).reshape(-1,1)[valid_shuffle_idx]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, 13*13*512))
W_flat = tf.get_variable("all_weights",[1029])
W_conv = tf.reshape(W_flat[:1024],[1, 1, 512, 2])
b_conv = tf.reshape(W_flat[1024:1026], [2])
W = tf.reshape(W_flat[-3:-1], [2, 1])
b = W_flat[-1]
conv = tf.reshape(X, [-1,13,13,512])
conv = tf.nn.conv2d(conv, W_conv, strides=[1,1,1,1], padding='VALID')
conv = tf.nn.bias_add(conv, b_conv)
conv = tf.nn.sigmoid(conv)
out = tf.nn.avg_pool(conv, (1, 13, 13, 1), (1,1,1,1), padding='VALID')
out = tf.reshape(out, [-1, 2])
y = tf.nn.sigmoid(tf.matmul(out, W) + b)
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for j in range(epochs):
for i in range(20):
batch_xs = train_data[100*i:100*(i+1)]
batch_ys = train_labels[100*i:100*(i+1)]
_, acc, cp = sess.run([train_step, accuracy, cross_entropy], feed_dict={X: batch_xs, y_: batch_ys})
if (j%20==0):
if (verbose):
print('Epoch '+str(j) + ', Batch Acc=' + str(acc))
print('Loss '+str(cp))
print("Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
print("Final Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
return sess, W_flat, cross_entropy, X, y, y_, train_data, train_labels, train_shuffle_idx, valid_shuffle_idx
def train_squeeze_on_flowers(seed=43, verbose=True):
np.random.seed(seed)
train_data_dir = 'data/flower_photos/train'
validation_data_dir = 'data/flower_photos/validation'
nb_train_samples = 1000
nb_validation_samples = 200
epochs = 240
batch_size = 20
train_shuffle_idx = np.random.permutation(nb_train_samples)
train_data = np.load('data/flower_photos/bottleneck_features_train-squeeze.npy').reshape(-1,13*13*512)[train_shuffle_idx]
train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)).reshape(-1,1)[train_shuffle_idx]
valid_shuffle_idx = np.random.permutation(nb_validation_samples)
validation_data = np.load('data/flower_photos/bottleneck_features_validation-squeeze.npy').reshape(-1,13*13*512)[valid_shuffle_idx]
validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)).reshape(-1,1)[valid_shuffle_idx]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=(None, 13*13*512))
W_flat = tf.get_variable("all_weights",[1029])
W_conv = tf.reshape(W_flat[:1024],[1, 1, 512, 2])
b_conv = tf.reshape(W_flat[1024:1026], [2])
W = tf.reshape(W_flat[-3:-1], [2, 1])
b = W_flat[-1]
conv = tf.reshape(X, [-1,13,13,512])
conv = tf.nn.conv2d(conv, W_conv, strides=[1,1,1,1], padding='VALID')
conv = tf.nn.bias_add(conv, b_conv)
conv = tf.nn.sigmoid(conv)
out = tf.nn.avg_pool(conv, (1, 13, 13, 1), (1,1,1,1), padding='VALID')
out = tf.reshape(out, [-1, 2])
y = tf.nn.sigmoid(tf.matmul(out, W) + b)
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for j in range(epochs):
for i in range(10):
batch_xs = train_data[100*i:100*(i+1)]
batch_ys = train_labels[100*i:100*(i+1)]
_, acc, cp = sess.run([train_step, accuracy, cross_entropy], feed_dict={X: batch_xs, y_: batch_ys})
if (j%20==0):
if (verbose):
print('Epoch '+str(j) + ', Batch Acc=' + str(acc))
print('Loss '+str(cp))
print("Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
print("Final Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
return sess, W_flat, cross_entropy, X, y, y_, train_data, train_labels, train_shuffle_idx, valid_shuffle_idx
def train_convnet_on_mnist_fashion(seed=43, verbose=True):
mnist = input_data.read_data_sets('data/fashion',one_hot=False)
X_train, y_train = mnist.train.next_batch(10000)
idx = np.where(y_train>7)[0][:1500]
X_train = X_train[idx]; y_train = y_train[idx].reshape(-1,1)-8
X_test, y_test = mnist.test.next_batch(1000)
idx = np.where(y_test>7)[0]
X_test = X_test[idx]; y_test = y_test[idx].reshape(-1,1)-8
np.random.seed(seed)
tf.reset_default_graph()
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.sigmoid(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Placeholders
n_w_conv = 5*5*16
n_b_conv = 16
n_mult = 14*14*16
n_bias = 1
lengths = [n_w_conv, n_b_conv, n_mult, n_bias]
idx = np.cumsum(lengths)
W_flat = tf.Variable(tf.zeros([idx[-1]]),name="W_flat") #total number of weights
i = 0
W_conv = tf.reshape(W_flat[0:idx[i]],[5,5,1,16]);
b_conv = tf.reshape(W_flat[idx[i]:idx[i+1]],[16]); i += 1
W_fc = tf.reshape(W_flat[idx[i]:idx[i+1]],[14*14*16, 1]); i += 1
b_fc = tf.reshape(W_flat[idx[i]:],[1]);
x = tf.placeholder(tf.float32, [None, X_train.shape[1]],name="x")
conv1a = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1b = conv2d(conv1a, W_conv, b_conv)
conv1c = maxpool2d(conv1b, k=2)
conv1d = tf.reshape(conv1c, [-1, n_mult])
y = tf.nn.sigmoid(tf.matmul(conv1d, W_fc) + b_fc)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
# Train
for _ in range(10):
for i in range(15):
batch_xs = X_train[100*i:100*(i+1)]
batch_ys = y_train[100*i:100*(i+1)]
_, acc = sess.run([train_step, accuracy], feed_dict={x: batch_xs, y_: batch_ys})
print('\r',end=str(i)+' '+str(acc)+' ')
batch_xs, batch_ys = mnist.test.next_batch(1000)
print("\nTest Accuracy:",sess.run(accuracy, feed_dict={x: X_test,y_: y_test}))
return sess, W_flat, cross_entropy, x, y, y_, X_train, y_train
def retrain_VGG_on_cats_dogs(seed=22, verbose=True):
global train_shuffle_idx, valid_shuffle_idx
np.random.seed(seed)
label_names = ['Cat','Dog']
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'vgg16_weights.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 10
batch_size = 16
train_shuffle_idx = np.random.permutation(nb_train_samples)
train_data = np.load('bottleneck_features_train.npy').reshape(-1,4*4*512)[train_shuffle_idx]
train_labels = np.array([0] * int(nb_train_samples / 2) + [1] * int(nb_train_samples / 2)).reshape(-1,1)[train_shuffle_idx]
valid_shuffle_idx = np.random.permutation(nb_validation_samples)
validation_data = np.load('bottleneck_features_validation.npy').reshape(-1,4*4*512)[valid_shuffle_idx]
validation_labels = np.array([0] * int(nb_validation_samples / 2) + [1] * int(nb_validation_samples / 2)).reshape(-1,1)[valid_shuffle_idx]
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
X = tf.placeholder(tf.float32, shape=(None, 4*4*512))
W_flat = tf.Variable(tf.zeros([train_data.shape[1]+1]),name="W_flat")
W = tf.reshape(W_flat[:-1], [train_data.shape[1], 1])
b = W_flat[-1]
y = tf.nn.sigmoid(tf.matmul(X, W) + b)
y_ = tf.placeholder(tf.float32, [None, 1],name="y_")
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y)) #yes, I know this isn't cross entropy...
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
correct_prediction = tf.equal(tf.round(y), y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for j in range(epochs):
for i in range(20):
batch_xs = train_data[100*i:100*(i+1)]
batch_ys = train_labels[100*i:100*(i+1)]
_, acc, cp = sess.run([train_step, accuracy, correct_prediction], feed_dict={X: batch_xs, y_: batch_ys})
if verbose:
print('Epoch '+str(j) + ', Batch Acc=' + str(acc))
print("Test Accuracy:",sess.run(accuracy, feed_dict={X: validation_data,y_: validation_labels}))
return g, sess, W_flat, cross_entropy, X, y, y_, train_data, train_labels, train_shuffle_idx, valid_shuffle_idx
class ImageLoader():
def __init__(self, model_name='Inception'):
K.clear_session()
if model_name=='Inception':
base_model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet')
out = base_model.output
out = GlobalAveragePooling2D()(out)
self.model = Model(inputs=base_model.input, outputs=out)
elif model_name=='VGG':
self.model = applications.VGG16(include_top=False, weights='imagenet')
else:
raise ValueError("Invalid model")
self.session = K.get_session()
def load_candidate_image(self, idx,img_folder='validation', noise=0, mode='gaussian', dataset='cats_dogs', model='VGG', array_of_pics=False):
from skimage.util import random_noise
from PIL import Image
if dataset=='cats_dogs':
filenames_train = np.load('filenames_train.npy')
filenames_validation = np.load('filenames_validation.npy')
img_width, img_height = 150, 150
root_path = 'data/'
size = [1,8192]
model = 'VGG'
print('Using VGG Model')
elif dataset=='flowers' and model=='Squeeze':
filenames_train = np.load('data/flower_photos/filenames_train-squeeze.npy')
filenames_validation = np.load('data/flower_photos/filenames_validation-squeeze.npy')
img_width, img_height = 227, 227
root_path = 'data/flower_photos/'
size = [1,13,13,512]
elif dataset=='flowers' and model=='Inception':
filenames_train = np.load('data/flower_photos/filenames_train-inception.npy')
filenames_validation = np.load('data/flower_photos/filenames_validation-inception.npy')
img_width, img_height = 227, 227
root_path = 'data/flower_photos/'
size = [1,2048]
else:
raise ValueError("Invalid dataset or model")
if (array_of_pics):
padding = 10
paths = [root_path+'train'+"/"+filenames_train[i] for i in idx]
images = [Image.open(p).resize((img_width, img_height)) for p in paths]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)+2*padding
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height),"white")
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]+padding
return new_im
else:
if (img_folder=='train'):
filename = filenames_train[idx]
elif (img_folder=='validation'):
filename = filenames_validation[idx]
else:
raise ValueError("Invalid argument for paraemter: folder")
#print("Opening file:"+root_path+img_folder+"/"+filename)
pic = Image.open(root_path+img_folder+"/"+filename)
pic = pic.resize((img_width, img_height))
pic = np.array(pic)/255
#pic = np.array(pic)
if (mode=='binary'):
pic += np.random.randint(2, size=pic.shape)*noise
pic = np.minimum(np.maximum(pic,0),1)
else:
pic = random_noise(pic, mode=mode, var=noise, clip=True)
pic = pic.reshape(-1,img_width, img_height, 3)
features = self.get_bottleneck_representation(pic,model_name=model)
return pic.squeeze(), features.reshape(size)
def pop_layer(self, model):
if not model.outputs:
raise Exception('Sequential model cannot be popped: model is empty.')
model.layers.pop()
if not model.layers:
model.outputs = []
model.inbound_nodes = []
model.outbound_nodes = []
else:
model.layers[-1].outbound_nodes = []
model.outputs = [model.layers[-1].output]
model.built = False
def get_bottleneck_representation(self, pic, model_name='VGG'):
from keras.preprocessing.image import ImageDataGenerator
img_width, img_height = 299, 299
datagen = ImageDataGenerator(rescale=1. / 255)
K.set_session(self.session)
#features = self.model.predict(pic)
for x_batch in datagen.flow(pic, None, batch_size=1):
#print(x_batch.shape)
features = self.model.predict(x_batch*255)
#print(np.allclose(x_batch, pic))
#print(np.allclose(x_batch, pic/255))
break
K.clear_session()
return features
#sess2 = tf.Session()
#if (model_name=='VGG'):
# model = applications.VGG16(include_top=False, weights='imagenet')
#if (model_name=='Inception'):
# base_model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet')
# out = base_model.output
# out = GlobalAveragePooling2D()(out)
# model = Model(inputs=base_model.input, outputs=out)
#elif (model_name=='Squeeze'):
# model = SqueezeNet(include_top=False) #the include_top doesn't actually seem to do anything
# pop_layer(model)
# pop_layer(model)
# pop_layer(model)
# pop_layer(model)
#else:
# raise ValueError("Invalid argument for paraemter: model_name")
def pgti(self,grads, pic, model='Inception'):
from keras import backend as K
K.set_session(self.session)
#K.set_learning_phase(1)
pic = pic.reshape(1, 227, 227, 3)
grads = grads.reshape(2048)
with K.get_session().graph.as_default():
gradient_wrt_features = tf.placeholder(tf.float32, shape=[2048])
base_model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet')
out = base_model.output
out = GlobalAveragePooling2D()(out)
model = Model(inputs=base_model.input, outputs=out)
flattened_features = tf.reshape(model.output, [-1])
product_of_gradient_and_features = tf.multiply(gradient_wrt_features, flattened_features)
product_of_gradient_and_features = tf.reduce_sum(product_of_gradient_and_features)
gradient_op = tf.gradients(product_of_gradient_and_features, model.input)
K.set_learning_phase(0)
gradient = self.session.run(gradient_op, feed_dict={model.input:pic, gradient_wrt_features:grads})
K.clear_session()
return gradient
def propagate_gradients_to_input(grads, pic, model='Inception'):
from keras import backend as K
#set learning phase
#tf.reset_default_graph()
sess2 = tf.Session()
K.set_session(sess2)
#K.set_learning_phase(0)
pic = pic.reshape(1, 227, 227, 3)
grads = grads.reshape(2048)
gradient_wrt_features = tf.placeholder(tf.float32, shape=[2048])
base_model = applications.inception_v3.InceptionV3(include_top=False, weights='imagenet')
out = base_model.output
out = GlobalAveragePooling2D()(out)
model = Model(inputs=base_model.input, outputs=out)
flattened_features = tf.reshape(model.output, [-1])
product_of_gradient_and_features = tf.multiply(gradient_wrt_features, flattened_features)
product_of_gradient_and_features = tf.reduce_sum(product_of_gradient_and_features)
gradient_op = tf.gradients(product_of_gradient_and_features, model.input)
K.set_learning_phase(0)
gradient = sess2.run(gradient_op, feed_dict={model.input:pic, gradient_wrt_features:grads})
K.clear_session()
del model
return gradient
def make_prediction(sess, X, y, features):
prob = sess.run(y, feed_dict={X:features})
if (prob<0.5):
return prob, 0
else:
return prob, 1
def get_nearest_neighbors(X_train, X_test, N):
#X_test = X_test.flatten()
dist_2 = np.sum((X_train - X_test)**2, axis=1)
idxs_most = | np.argpartition(dist_2, -N, axis=0) | numpy.argpartition |
import os
import numpy as np
import random
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/2DCNN-14.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 128 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:,:,0]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
[m2,n2] = temp2.shape
x2 = np.empty((m2, n2, l), dtype='float32')
for i in range(l):
temp = x[:,:,i]
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
temp2 = np.pad(temp, pad_width, 'symmetric')
x2[:,:,i] = temp2
# construct the training and testing set
[ind1, ind2] = np.where(TrLabel != 0)
TrainNum = len(ind1)
TrainPatch = np.empty((TrainNum, l, patchsize, patchsize), dtype='float32')
TrainLabel = np.empty(TrainNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
# patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width + 1), (ind4[i] - pad_width):(ind4[i] + pad_width + 1), :]
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TrainPatch[i, :, :, :] = patch
patchlabel = TrLabel[ind1[i], ind2[i]]
TrainLabel[i] = patchlabel
[ind1, ind2] = np.where(TsLabel != 0)
TestNum = len(ind1)
TestPatch = np.empty((TestNum, l, patchsize, patchsize), dtype='float32')
TestLabel = np.empty(TestNum)
ind3 = ind1 + pad_width
ind4 = ind2 + pad_width
for i in range(len(ind1)):
patch = x2[(ind3[i] - pad_width):(ind3[i] + pad_width), (ind4[i] - pad_width):(ind4[i] + pad_width), :]
patch = np.reshape(patch, (patchsize * patchsize, l))
patch = np.transpose(patch)
patch = np.reshape(patch, (l, patchsize, patchsize))
TestPatch[i, :, :, :] = patch
patchlabel = TsLabel[ind1[i], ind2[i]]
TestLabel[i] = patchlabel
print('Training size and testing size are:', TrainPatch.shape, 'and', TestPatch.shape)
# step3: change data to the input type of PyTorch
TrainPatch = torch.from_numpy(TrainPatch)
TrainLabel = torch.from_numpy(TrainLabel)-1
TrainLabel = TrainLabel.long()
dataset = dataf.TensorDataset(TrainPatch, TrainLabel)
train_loader = dataf.DataLoader(dataset, batch_size=batchsize, shuffle=True)
TestPatch = torch.from_numpy(TestPatch)
TestLabel = torch.from_numpy(TestLabel)-1
TestLabel = TestLabel.long()
Classes = len(np.unique(TrainLabel))
OutChannel = 32
# construct the network
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels = l,
out_channels = OutChannel,
kernel_size = 3,
stride = 1,
padding = 1,
),
nn.BatchNorm2d(OutChannel),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(OutChannel, OutChannel*2, 3, 1, 1),
nn.BatchNorm2d(OutChannel*2),
nn.ReLU(),
nn.MaxPool2d(2),
# nn.Dropout(0.5),
)
self.conv3 = nn.Sequential(
nn.Conv2d(OutChannel*2, OutChannel*4, 3, 1, 1),
nn.BatchNorm2d(OutChannel*4),
nn.ReLU(),
nn.AdaptiveMaxPool2d(1),
# nn.Dropout(0.5),
)
self.out = nn.Linear(OutChannel*4, Classes) # fully connected layer, output 16 classes
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.out(x)
return output
cnn = CNN()
print('The structure of the designed network', cnn)
# move model to GPU
cnn.cuda()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
BestAcc = 0
# train and test the designed model
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
# move train data to GPU
b_x = b_x.cuda()
b_y = b_y.cuda()
output = cnn(b_x) # cnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel) // 5000
for i in range(number):
temp = TestPatch[i * 5000:(i + 1) * 5000, :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i * 5000:(i + 1) * 5000] = temp3.cpu()
del temp, temp2, temp3
if (i + 1) * 5000 < len(TestLabel):
temp = TestPatch[(i + 1) * 5000:len(TestLabel), :, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i + 1) * 5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
accuracy = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
# test_output = rnn(TestData)
# pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze()
# accuracy = torch.sum(pred_y == TestDataLabel).type(torch.FloatTensor) / TestDataLabel.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
# save the parameters in network
if accuracy > BestAcc:
torch.save(cnn.state_dict(), 'W3-DLSection/HU2013/net_params_2DCNN.pkl')
BestAcc = accuracy
cnn.train()
# # test each class accuracy
# # divide test set into many subsets
cnn.load_state_dict(torch.load('W3-DLSection/HU2013/net_params_2DCNN.pkl'))
cnn.eval()
pred_y = np.empty((len(TestLabel)), dtype='float32')
number = len(TestLabel)//5000
for i in range(number):
temp = TestPatch[i*5000:(i+1)*5000, :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[i*5000:(i+1)*5000] = temp3.cpu()
del temp, temp2, temp3
if (i+1)*5000 < len(TestLabel):
temp = TestPatch[(i+1)*5000:len(TestLabel), :, :]
temp = temp.cuda()
temp2 = cnn(temp)
temp3 = torch.max(temp2, 1)[1].squeeze()
pred_y[(i+1)*5000:len(TestLabel)] = temp3.cpu()
del temp, temp2, temp3
pred_y = torch.from_numpy(pred_y).long()
OA = torch.sum(pred_y == TestLabel).type(torch.FloatTensor) / TestLabel.size(0)
Classes = np.unique(TestLabel)
EachAcc = np.empty(len(Classes))
for i in range(len(Classes)):
cla = Classes[i]
right = 0
sum = 0
for j in range(len(TestLabel)):
if TestLabel[j] == cla:
sum += 1
if TestLabel[j] == cla and pred_y[j] == cla:
right += 1
EachAcc[i] = right.__float__()/sum.__float__()
print(OA)
print(EachAcc)
del TestPatch, TrainPatch, TrainLabel, b_x, b_y, dataset, train_loader
# show the whole image
# The whole data is too big to test in one time; So dividing it into several parts
part = 5000
pred_all = np.empty((m*n, 1), dtype='float32')
number = m*n//part
for i in range(number):
D = np.empty((part, l, patchsize, patchsize), dtype='float32')
count = 0
for j in range(i*part, (i+1)*part):
row = j//n
col = j - row*n
row2 = row + pad_width
col2 = col + pad_width
patch = x2[(row2 - pad_width):(row2 + pad_width), (col2 - pad_width):(col2 + pad_width), :]
patch = | np.reshape(patch, (patchsize * patchsize, l)) | numpy.reshape |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
import scipy.io
import numpy as np
import argparse
from scipy.stats import mode
from tqdm import tqdm
parser = argparse.ArgumentParser(description="hsi few-shot classification")
parser.add_argument("--data_folder", type=str, default='../data/')
parser.add_argument("--data_name", type=str, default='sar_train')
parser.add_argument("--labels_name", type=str, default='train_label')
args = parser.parse_args()
data_load = scipy.io.loadmat(args.data_folder + str(args.data_name) + '.mat')
label_load = scipy.io.loadmat(args.data_folder + str(args.labels_name) + '.mat')
data_key = list(data_load.keys())
label_key = list(label_load.keys())
print(data_key)
print("*"*100)
print(label_key)
input()
feature_1 = data_load['sar_1']; labels_1 = label_load['label_1']
feature_2 = data_load['sar_2']; labels_2 = label_load['label_2']
feature_3 = data_load['sar_3']; labels_3 = label_load['label_3']
print(feature_1.shape)
print(labels_1.shape)
old_feature = np.asarray([feature_1, feature_2, feature_3], dtype=float) # (3,1200,900)
old_labels = np.asarray([labels_1, labels_2, labels_3]) # (3,1200,900)
# scipy.io.savemat('./data/sar_train_nopool.mat', mdict={"feature": old_feature,"labels": old_labels})
pool_size = 2
h_size = feature_1.shape[0] // pool_size
v_size = feature_1.shape[1] // pool_size
new_feature = np.empty((old_feature.shape[0], h_size, v_size)) # (3,240,180)
new_labels = np.empty((old_feature.shape[0], h_size, v_size)) # (3,240,180)
print(new_feature.shape, new_labels.shape)
for i in range(new_feature.shape[0]):
for j in tqdm(range(h_size)):
for k in range(v_size):
new_feature[i][j, k] = | np.mean(old_feature[i][j*pool_size:(j+1)*pool_size, k*pool_size:(k+1)*pool_size]) | numpy.mean |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
# Project: dspus
# injections.py
# Created by @wenchieh on <1/12/2020>
__author__ = 'wenchieh'
# sys
from random import sample
# third-part libs
import numpy as np
from scipy import linalg
from scipy.sparse import *
# parameters in injection -
# spike(M, N, Dspike, C),
# gap(M, N, D0, Dgap, C)
def injectSpike(Nall, M, N, Dspike, C):
Nstart, i = Nall, Nall
injectEs = list()
injectUs, injectVs = range(Nall, Nall + M, 1), range(Nall, Nall + N, 1)
for m in range(M):
# standard normal distribution
v1, v2, w = 0.0, 0.0, 2.0
while w > 1.0:
v1 = random.random() * 2.0 - 1.0
v2 = random.random() * 2.0 - 1.0
w = v1 * v1 + v2 * v2
outd = int(Dspike + v1 * np.sqrt(-2.0 * np.log(w) / w))
if outd < 0: outd = Dspike
outdC = int(outd * C)
outdN = outd - outdC
Ns, Cs = set(), set()
for d in range(outdN):
Ns.add(Nstart + M + random.randint(N))
for d in range(outdC):
Cs.add(random.randint(Nall))
for j in Ns:
injectEs.append([i, j])
for j in Cs:
injectEs.append([i, j])
i += 1
return len(injectEs), injectEs, injectUs, injectVs
def injectGap(Nall, M, N, D0, Dgap, C):
injectEs = list()
injectUs, injectVs = range(Nall, Nall + M, 1), range(Nall, Nall + N, 1)
Nstart, i = Nall, Nall
Md = int(1.0 * M / (Dgap - D0 + 1))
for outd in range(D0, Dgap, 1):
for m in range(Md):
outdC = int(outd * C)
outdN = outd - outdC
Ns, Cs = set(), set()
for d in range(outdN):
Ns.add(Nstart + M + random.randint(N))
for d in range(outdC):
Cs.add(random.randint(Nall))
for j in Ns:
injectEs.append([i, j])
for j in Cs:
injectEs.append([i, j])
i += 1
return len(injectEs), injectEs, injectUs, injectVs
def genEvenDenseBlock(A, B, p):
m = []
for i in range(A):
a = np.random.binomial(1, p, B)
m.append(a)
return np.array(m)
def genHyperbolaDenseBlock(A, B, alpha, tau):
'this is from hyperbolic paper: i^\alpha * j^\alpha > \tau'
m = np.empty([A, B], dtype=int)
for i in range(A):
for j in range(B):
if (i+1)**alpha * (j+1)**alpha > tau:
m[i,j] = 1
else:
m[i,j] = 0
return m
def genDiHyperRectBlocks(A1, B1, A2, B2, alpha=-0.5, tau=None, p=1):
if tau is None:
tau = A1**alpha * B1**alpha
m1 = genEvenDenseBlock(A1, B1, p=p)
m2 = genHyperbolaDenseBlock(A2, B2, alpha, tau)
M = linalg.block_diag(m1, m2)
return M
def addnosie(M, A, B, p, black=True, A0=0, B0=0):
v = 1 if black else 0
for i in range(A-A0):
a = np.random.binomial(1, p, B-B0)
for j in a.nonzero()[0]:
M[A0+i,B0+j]=v
return M
# inject a clique of size m0 by n0 with density p.
# the last parameter `testIdx` determines the camouflage type.
# testIdx = 1: random camouflage, with camouflage density set so each fraudster outputs approximately equal number of fraudulent and camouflage edges
# testIdx = 2: random camouflage, with double the density as in the precious setting
# testIdx = 3: biased camouflage, more likely to add camouflage to high degree column
#
# def injectCliqueCamo(M, m0, n0, p, testIdx):
# (m,n) = M.shape
# M2 = M.copy().tolil()
#
# colSum = np.squeeze(M2.sum(axis = 0).A)
# colSumPart = colSum[n0:n]
# colSumPartPro = np.int_(colSumPart)
# colIdx = np.arange(n0, n, 1)
# population = np.repeat(colIdx, colSumPartPro, axis = 0)
#
# for i in range(m0):
# # inject clique
# for j in range(n0):
# if random.random() < p:
# M2[i,j] = 1
# # inject camo
# if testIdx == 1:
# thres = p * n0 / (n - n0)
# for j in range(n0, n):
# if random.random() < thres:
# M2[i,j] = 1
# if testIdx == 2:
# thres = 2 * p * n0 / (n - n0)
# for j in range(n0, n):
# if random.random() < thres:
# M2[i,j] = 1
# # biased camo
# if testIdx == 3:
# colRplmt = random.sample(population, int(n0 * p))
# M2[i,colRplmt] = 1
#
# return M2.tocsc()
# inject a clique of size m0 by n0 with density p.
# the last parameter `testIdx` determines the camouflage type.
# testIdx = 1: random camouflage, with camouflage density set so each fraudster outputs approximately equal number of fraudulent and camouflage edges
# testIdx = 2: random camouflage, with double the density as in the precious setting
# testIdx = 3: biased camouflage, more likely to add camouflage to high degree column
def injectCliqueCamo(M, m0, n0, p, testIdx):
(m, n) = M.shape
injectEs = list()
injectUs, injectVs = np.arange(m0), np.arange(n0)
if testIdx in [3, 4]: # popular biased camouflage
colSum = np.squeeze(M.sum(axis = 0).A)
colSumPart = colSum[n0:n]
colSumPartPro = np.int_(colSumPart)
colIdx = np.arange(n0, n, 1)
population = np.repeat(colIdx, colSumPartPro, axis = 0)
for i in range(m0):
# inject clique
for j in range(n0):
if np.random.random() < p:
injectEs.append([i,j])
if testIdx == 0:
continue
# inject random camo
if testIdx == 1:
thres = p * n0 / (n - n0)
for j in range(n0, n):
if np.random.random() < thres:
injectEs.append([i,j])
if testIdx == 2:
thres = 2 * p * n0 / (n - n0)
for j in range(n0, n):
if | np.random.random() | numpy.random.random |
'''
dyn_prog.py
implements a scheme similar to solving valuation of american options for the life cycle model
this is a kind of dynamic programming scheme
'''
import math
import gym
from gym import spaces, logger, utils, error
from gym.utils import seeding
import numpy as np
from fin_benefits import Benefits
import gym_unemployment
import matplotlib.pyplot as plt
import h5py
from tqdm import tqdm_notebook as tqdm
from lifecycle_rl import Lifecycle
from scipy.interpolate import interpn,interp1d,interp2d,RectBivariateSpline,PchipInterpolator
from scipy.integrate import quad,trapz
from scipy.special import softmax
import time
from scipy.stats import gaussian_kde
class DynProgLifecycleRev(Lifecycle):
def __init__(self,minimal=True,env=None,timestep=1.0,ansiopvraha_kesto300=None,min_retirementage=None,
ansiopvraha_kesto400=None,karenssi_kesto=None,osittainen_perustulo=None,
ansiopvraha_toe=None,plotdebug=False,mortality=None,max_wage_old=None,max_empwage_old=None,
gamma=None,n_palkka=None,n_palkka_old=None,n_elake=None,n_tis=None,n_palkka_future=None,
n_emppalkka=None,n_emppalkka_old=None,
max_pension=None,min_wage=None,max_wage=None,max_empwage=None,perustulo=None,perustulomalli=None,
perustulo_korvaa_toimeentulotuen=None,startage=None):
super().__init__(minimal=minimal,env=env,timestep=timestep,ansiopvraha_kesto300=ansiopvraha_kesto300,
ansiopvraha_kesto400=ansiopvraha_kesto400,karenssi_kesto=karenssi_kesto,min_retirementage=min_retirementage,
ansiopvraha_toe=ansiopvraha_toe,mortality=mortality,plotdebug=plotdebug,
gamma=gamma,perustulo=perustulo,perustulomalli=perustulomalli,osittainen_perustulo=osittainen_perustulo,
perustulo_korvaa_toimeentulotuen=perustulo_korvaa_toimeentulotuen,startage=startage)
'''
Alusta muuttujat
'''
#self.min_salary=1000
#self.hila_palkka0 = 1000 # = self.min_salary # 0
self.spline=True
self.extrapolate=True
self.pw_bivariate=True
if True:
self.spline_approx='cubic' #'cubic'
self.pw_maxspline=3 # 1 = linear, 3 = cubic
else:
self.spline_approx='linear'
self.pw_maxspline=1 # 1 = linear, 3 = cubic
self.minspline=True # rajoita splineille alarajaa
self.minbispline=True # rajoita splineille alarajaa
self.monotone_spline=False #True
self.oamonotone_spline=False
#self.spline_approx='quadratic'
#self.spline_approx='linear'
self.integmethod=0 # compare
self.n_employment=3
self.epsabs=1e-6
# dynaamisen ohjelmoinnin parametrejä
self.n_palkka = 20
self.n_palkka_old = 10
self.n_emppalkka = 20
self.n_emppalkka_old = 10
self.n_palkka_future = 21
self.n_palkka_future_tr = 201
self.n_elake = 40
self.n_oapension = 1000
self.n_tis = 2 # ei vaikutusta palkkaan
self.min_wage=1_000
self.max_wage=85_000
self.max_wage_old=self.max_wage
self.max_empwage=self.max_wage
self.max_empwage_old=self.max_wage_old
self.min_pension=0 # for active
self.max_pension=50_000 # for active
self.min_oapension=500*12 # for retired
self.max_oapension=80_000 # for retired
#self.factor_future_wage=8
if n_palkka is not None:
self.n_palkka=n_palkka
if n_palkka_old is not None:
self.n_palkka_old=n_palkka_old
if n_emppalkka is not None:
self.n_emppalkka=n_emppalkka
if n_emppalkka_old is not None:
self.n_emppalkka_old=n_emppalkka_old
if n_palkka_future is not None:
self.n_palkka_future=n_palkka_future
if n_elake is not None:
self.n_elake=n_elake
if n_tis is not None:
self.n_tis=n_tis
if max_wage is not None:
self.max_wage=max_wage
if max_wage_old is not None:
self.max_wage_old=max_wage_old
if max_empwage is not None:
self.max_empwage=max_empwage
if max_empwage_old is not None:
self.max_empwage_old=max_empwage_old
if min_wage is not None:
self.min_wage=min_wage
if max_pension is not None:
self.max_pension=max_pension
#if factor_future_wage is not None:
# self.factor_future_wage=factor_future_wage
self.deltapalkka = (self.max_wage-self.min_wage)/(self.n_palkka-1)
self.deltapalkka_old = (self.max_wage_old-self.min_wage)/(self.n_palkka_old-1)
self.deltaemppalkka = (self.max_empwage-self.min_wage)/(self.n_emppalkka-1)
self.deltaemppalkka_old = (self.max_empwage_old-self.min_wage)/(self.n_emppalkka_old-1)
self.deltaelake = (self.max_pension-self.min_pension)/(self.n_elake-1)
self.delta_oapension = (self.max_oapension-self.min_oapension)/(self.n_oapension-1)
self.deltatis = 1
self.include_pt=False
#self.midfuture=int(np.floor(self.n_palkka_future/2))
#self.deltafuture=self.factor_future_wage*0.07*0.5/self.midfuture
self.min_grid_age=self.min_age
self.max_grid_age=self.max_age
print('min',self.min_retirementage)
if False:
self.unemp_wageshock=1.0 #0.95
else:
self.unemp_wageshock=0.95
if self.spline:
self.get_V=self.get_V_spline
self.get_V_vector=self.get_V_vector_spline
self.get_actV=self.get_actV_spline
self.get_actReward=self.get_actReward_spline
self.get_act=self.get_act_spline
else:
self.get_V=self.get_V_nospline
self.get_V_vector=self.get_V_vector_nospline
self.get_actV=self.get_actV_nospline
self.get_actReward=self.get_actReward_nospline
self.get_act=self.get_act_nospline
def init_grid(self):
self.unempHila = np.zeros((self.n_time+1,self.n_palkka_old,self.n_elake,self.n_tis,self.n_palkka))
self.unempActHila = np.zeros((self.n_time+1,self.n_palkka_old,self.n_elake,self.n_tis,self.n_palkka,self.n_acts))
self.unempActReward = np.zeros((self.n_time+1,self.n_palkka_old,self.n_elake,self.n_tis,self.n_palkka,self.n_acts))
self.empHila = np.zeros((self.n_time+1,self.n_emppalkka_old,self.n_elake,self.n_emppalkka))
self.empActHila = np.zeros((self.n_time+1,self.n_emppalkka_old,self.n_elake,self.n_emppalkka,self.n_acts))
self.empActReward = np.zeros((self.n_time+1,self.n_emppalkka_old,self.n_elake,self.n_emppalkka,self.n_acts))
self.oaHila = np.zeros((self.n_time+1,self.n_oapension))
self.oaactHila = np.zeros((self.n_time+1,self.n_oapension))
self.oaactReward = np.zeros((self.n_time+1,self.n_oapension))
def explain(self):
print(f'n_palkka {self.n_palkka}\nn_palkka_old {self.n_palkka_old}\nn_elake {self.n_elake}\nn_palkka_future {self.n_palkka_future}')
print(f'n_emppalkka {self.n_emppalkka}\nn_emppalkka_old {self.n_emppalkka_old}\nn_elake {self.n_elake}')
print(f'n_oapension {self.n_oapension}')
print(f'min_wage {self.min_wage} max_wage {self.max_wage} max_wage_old {self.max_wage_old}')
print(f'min_empwage {self.min_wage} max_empwage {self.max_empwage} max_empwage_old {self.max_empwage_old}')
print(f'min_pension {self.min_pension} max_pension {self.max_pension}')
print(f'min_oapension {self.min_oapension} max_oapension {self.max_oapension}')
print(f'deltapalkka {self.deltapalkka} deltapalkka_old {self.deltapalkka_old} deltaelake {self.deltaelake}')
print(f'delta_oapension {self.delta_oapension}')
print(f'n_tis {self.n_tis} deltatis {self.deltatis}')
print(f'gamma {self.gamma} timestep {self.timestep}')
self.env.explain()
def map_elake(self,v,emp=1):
if emp==2:
return self.min_oapension+self.delta_oapension*v # pitäisikö käyttää exp-hilaa?
else:
return self.min_pension+self.deltaelake*v # pitäisikö käyttää exp-hilaa?
def inv_elake(self,v,emp=1):
if emp==2:
vmin=max(0,min(self.n_oapension-2,int(np.floor((v-self.min_oapension)/self.delta_oapension))))
vmax=vmin+1
w=max(0,v-self.min_oapension)/self.delta_oapension-vmin # lin.approximaatio
else:
vmin=max(0,min(self.n_elake-2,int(np.floor((v-self.min_pension)/self.deltaelake))))
vmax=vmin+1
w=max(0,v-self.min_pension)/self.deltaelake-vmin # lin.approximaatio
if w<0:
print(f'w<0: {w} {v} {vmin}')
w=0
#raise ValueError('A very specific bad thing happened.')
return vmin,vmax,w
# def map_exp_elake(self,v):
# return self.min_pension+self.deltaelake*(np.exp(v*self.expelakescale)-1)
#
# def inv_exp_elake(self,v):
# vmin=max(0,min(self.n_elake-2,int((np.log(v-self.min_pension)+1)/self.deltaelake)))
# vmax=vmin+1
# vmin_value=self.map_exp_elake(vmin)
# vmax_value=self.map_exp_elake(vmax)
# w=(v-vmin_value)/(self.vmax_value-vmin_value) # lin.approximaatio
#
# return vmin,vmax,w
def map_palkka_old(self,v,emp,midpoint=False):
if emp==0:
deltapalkka_old=self.deltapalkka_old
elif emp==1:
deltapalkka_old=self.deltaemppalkka_old
if midpoint:
return self.min_wage+max(0,deltapalkka_old*(v+0.5))
else:
return self.min_wage+max(0,deltapalkka_old*v)
def inv_palkka_old(self,v,emp):
if emp==0:
deltapalkka_old=self.deltapalkka_old
n_palkka_old=self.n_palkka_old
elif emp==1:
deltapalkka_old=self.deltaemppalkka_old
n_palkka_old=self.n_emppalkka_old
q=int(np.floor((v-self.min_wage)/deltapalkka_old))
vmin=int(max(0,min(n_palkka_old-2,q)))
vmax=vmin+1
w=max(0,v-self.min_wage)/deltapalkka_old-vmin # lin.approximaatio
if w<0:
print(f'w<0: {w} {v} {vmin}')
w=0
#raise ValueError('A very specific bad thing happened.')
return vmin,vmax,w
def map_palkka(self,v,emp,midpoint=False):
if emp==0:
deltapalkka=self.deltapalkka
elif emp==1:
deltapalkka=self.deltaemppalkka
if midpoint:
return self.min_wage+max(0,deltapalkka*(v+0.5))
else:
return self.min_wage+max(0,deltapalkka*v)
def inv_palkka(self,v,emp):
if emp==0:
deltapalkka=self.deltapalkka
n_palkka=self.n_palkka
elif emp==1:
deltapalkka=self.deltaemppalkka
n_palkka=self.n_emppalkka
q=int(np.floor((v-self.min_wage)/deltapalkka))
vmin=int(max(0,min(n_palkka-2,q)))
vmax=vmin+1
w=max(0,v-self.min_wage)/deltapalkka-vmin # lin.approximaatio
if w<0:
print(f'w<0: {w} {v} {vmin}')
w=0
#raise ValueError('A very specific bad thing happened.')
return vmin,vmax,w
def test_map_palkka_old(self):
'''
debug function
'''
for k in range(1000,100000,1000):
vmin,vmax,w=self.inv_elake(k,emp=2)
p2=(1-w)*self.map_elake(vmin,emp=2)+w*self.map_elake(vmax,emp=2)
print(k,p2,vmin,vmax,w)
for p in range(self.n_palkka):
palkka=self.map_palkka(p)
print(palkka)
def test_map_palkka(self,emp=1):
print('palkka')
wage=np.linspace(self.min_wage, self.max_wage, self.n_palkka)
wage_old=np.linspace(self.min_wage, self.max_wage_old, self.n_palkka_old)
pension=np.linspace(0, self.max_pension, self.n_elake)
for x in np.arange(self.min_wage-10_000,self.max_wage+10_000,1_000):
vmin,vmax,w=self.inv_palkka(x,emp)
p2=(1-w)*self.map_palkka(vmin,emp)+w*self.map_palkka(vmax,emp)
p3=(1-w)*wage[vmin]+w*wage[vmax]
print(x,p2,p3)
print('palkka_old')
for x in np.arange(self.min_wage-10_000,self.max_wage_old+10_000,1_000):
vmin,vmax,w=self.inv_palkka_old(x,emp)
p2=(1-w)*self.map_palkka_old(vmin,emp)+w*self.map_palkka_old(vmax,emp)
p3=(1-w)*wage_old[vmin]+w*wage_old[vmax]
print(x,p2,p3)
print('elake')
for x in np.arange(self.min_pension-10_000,self.max_pension+10_000,1_000):
vmin,vmax,w=self.inv_elake(x,emp=emp)
p2=(1-w)*self.map_elake(vmin,emp=emp)+w*self.map_elake(vmax,emp=emp)
p3=(1-w)*pension[vmin]+w*pension[vmax]
print(x,p2,p3)
# def map_palkka_future(self,palkka,v,med,state=1,midpoint=False):
# #if state==0:
# # kerroin=self.unemp_wageshock
# #else:
# # kerroin=1.0
# if midpoint:
# return med*(1+(v+0.5-self.midfuture)*self.deltafuture)
# else:
# return med*(1+(v-self.midfuture)*self.deltafuture)
def map_palkka_future_v4(self,palkka,age,state=1,midpoints=None):
midp=[1.0*(v+1)/self.n_palkka_future for v in range(self.n_palkka_future)]
midp_w=self.env.wage_process_map(midp,palkka,age,state=state)
rho=1.0/self.n_palkka_future
#print(midp_w,rho)
w=np.zeros(self.n_palkka_future)
for pnext in range(self.n_palkka_future-1):
w[pnext]=self.env.wage_process_expect(midp_w[pnext],midp_w[pnext+1],palkka,age,state=state)/rho
w[self.n_palkka_future-1]=self.env.wage_process_expect(midp_w[self.n_palkka_future-1],1e6,palkka,age,state=state)/rho
#w=self.env.wage_process_map(x,palkka,age,state=state)
return midp_w,w
def map_palkka_future_v2(self,palkka,age,state=1,midpoint=False):
def f(x):
if x>self.n_palkka_future/2:
return (0.49+x)/self.n_palkka_future
else:
return (0.51+x)/self.n_palkka_future
if midpoint:
x=[1.0*(v+1)/self.n_palkka_future for v in range(self.n_palkka_future)]
else:
if state==1: # in the large limit, it does not really matter which addition here is as long as x\in (0,1)
x=[f(v) for v in range(self.n_palkka_future)]
else:
#x=[1.0*(0.50+v)/self.n_palkka_future for v in range(self.n_palkka_future)]
x=[f(v) for v in range(self.n_palkka_future)]
w=self.env.wage_process_map(x,palkka,age,state=state)
return w
def map_palkka_future_v3(self,palkka,age,state=1,midpoint=False):
x=0.0001+np.array([0.9998*v/self.n_palkka_future for v in range(self.n_palkka_future+1)])
w=self.env.wage_process_map(x,palkka,age,state=state)
return w
def test_palkka_future(self):
for s in range(2):
for palkka in range(1000,50000,5000):
for v in range(self.n_palkka_future):
p=self.map_palkka_future(palkka,v,s)
qmin,qmax,ql=self.inv_palkka_future(palkka,p,s)
print(f'{palkka}: {p} {qmin} {qmax} {ql} {v}')
def map_tis(self,v):
return v
def inv_tis(self,v):
return int(min(self.n_tis-1,v))
# lineaarinen approksimaatio
def get_V_spline(self,s=None,emp=None,elake=None,old_wage=None,time_in_state=None,wage=None,show=False,age=None):
'''
hae hilasta tilan s arvo hetkelle t
'''
if emp is None:
emp,elake,old_wage,age,time_in_state,wage=self.env.state_decode(s)
t = self.map_grid_age(age)
if age>self.max_age:
return 0.0
tismax=self.inv_tis(time_in_state)
if emp==2:
tismax=0
x = np.linspace(self.min_oapension,self.max_oapension,self.n_oapension)
y = self.oaHila[t,:]
if self.oamonotone_spline:
f = PchipInterpolator(x,y,extrapolate=True)
else:
f = interp1d(x,y,fill_value="extrapolate",kind=self.spline_approx)
if self.minspline:
V1=max(y[0],f(max(self.min_oapension,elake))) # max tarkoittaa käytännössä takuueläkettä
else:
V1=max(0,f(max(self.min_oapension,elake))) # max tarkoittaa käytännössä takuueläkettä
else:
emin,emax,we=self.inv_elake(elake,emp=emp)
pmin,pmax,wp=self.inv_palkka_old(old_wage,emp)
if self.pw_bivariate:
if emp==0:
values=(1-wp)*self.unempHila[t,pmin,:,tismax,:]+wp*self.unempHila[t,pmax,:,tismax,:]
elif emp==1:
values=(1-wp)*self.empHila[t,pmin,:,:]+wp*self.empHila[t,pmax,:,:]
else:
if emp==0:
values=(1-we)*self.unempHila[t,:,emin,tismax,:]+we*self.unempHila[t,:,emax,tismax,:]
elif emp==1:
values=(1-we)*self.empHila[t,:,emin,:]+we*self.empHila[t,:,emax,:]
V1=self.get_V_values(emp,elake,wage,old_wage,values)
#if show:
# print(f'getV({emp},{elake},{old_wage},{wage}): p2min {p2min} p2max {p2max} wp2 {wp2})')
# print(self.Hila[t,pmin,emin,emp,tismax,p2min],self.Hila[t,pmin,emin,emp,tismax,p2max])
V=np.maximum(0,V1)
return V
# lineaarinen approksimaatio
def get_V_vector_spline(self,s=None,emp=None,elake=None,old_wage=None,time_in_state=None,wages=None,show=False,age=None,debug=False):
'''
hae hilasta tilan s arvo hetkelle t
'''
#if t>self.n_time:
# return 0
Vs=np.zeros(wages.shape)
if emp is None:
emp,elake,old_wage,age,time_in_state,wage=self.env.state_decode(s)
t=self.map_grid_age(age)
if emp==2:
tismax=0
x = np.linspace(self.min_oapension,self.max_oapension, self.n_oapension)
y = self.oaHila[t,:]
if self.oamonotone_spline:
f = PchipInterpolator(x,y,extrapolate=True)
else:
f = interp1d(x, y,fill_value="extrapolate",kind=self.spline_approx)
if self.minspline:
Vs[:]=np.maximum(y[0],f(elake))
else:
Vs[:]=np.maximum(0,f(elake))
else:
emin,emax,we=self.inv_elake(elake,emp=emp)
pmin,pmax,wp=self.inv_palkka_old(old_wage,emp)
tismax=self.inv_tis(time_in_state)
p = np.linspace(0, self.max_pension, self.n_elake)
if emp==0:
max_wage=self.max_wage
max_wage_old=self.max_wage_old
n_palkka=self.n_palkka
n_palkka_old=self.n_palkka_old
elif emp==1:
max_wage=self.max_empwage
max_wage_old=self.max_empwage_old
n_palkka=self.n_emppalkka
n_palkka_old=self.n_emppalkka_old
w = np.linspace(self.min_wage, max_wage, n_palkka)
w_old = np.linspace(self.min_wage, max_wage_old, n_palkka_old)
if self.pw_bivariate:
if emp==0:
values=(1-wp)*self.unempHila[t,pmin,:,tismax,:]+wp*self.unempHila[t,pmax,:,tismax,:]
elif emp==1:
values=(1-wp)*self.empHila[t,pmin,:,:]+wp*self.empHila[t,pmax,:,:]
else:
error(1)
g=RectBivariateSpline(p,w, values,kx=self.pw_maxspline,ky=self.pw_maxspline)
if self.monotone_spline:
g_extra = PchipInterpolator(w,g(elake,w)[0,:],extrapolate=True)
else:
g_extra = interp1d(w,g(elake,w),fill_value="extrapolate",kind='linear')
for ind,wage in enumerate(wages):
if elake>self.max_pension and self.extrapolate:
if wage>max_wage and self.extrapolate:
vh=np.zeros(n_palkka)
for k,wg in enumerate(w):
gw=interp1d(p,values[:,k],fill_value="extrapolate",kind='linear') #self.spline_approx)
vh[k]=gw(elake)
h=interp1d(w,vh,fill_value="extrapolate",kind='linear') #self.spline_approx)
if self.minbispline:
Vs[ind] = max(values[0,0],np.squeeze(h(wage)))
else:
Vs[ind] = max(0.0,np.squeeze(h(wage)))
else:
gq=interp1d(p,g(p,wage)[:,0],fill_value="extrapolate",kind='linear') #self.spline_approx)
if self.minbispline:
Vs[ind] = max(values[0,0],np.squeeze(gq(elake)))
else:
Vs[ind] = max(0.0,np.squeeze(gq(elake)))
else:
if wage>max_wage and self.extrapolate:
# entä jos old_wage>max_wage??
#g_extra=interp1d(w,g(elake,w),fill_value="extrapolate",kind='linear') #self.spline_approx)
V1 = np.squeeze(g_extra(wage))
else:
V1 = np.squeeze(g(elake,wage))
if self.minbispline:
Vs[ind]=max(values[0,0],V1)
else:
Vs[ind]=max(0,V1)
else:
if emp==0:
values=(1-we)*self.unempHila[t,:,emin,tismax,:]+we*self.unempHila[t,:,emax,tismax,:]
elif emp==1:
values=(1-we)*self.empHila[t,:,emin,:]+we*self.empHila[t,:,emax,:]
else:
error(1)
g=RectBivariateSpline(w_old,w,values,kx=self.pw_maxspline,ky=self.pw_maxspline)
if self.monotone_spline:
g_extra = PchipInterpolator(w,g(old_wage,w)[0,:],extrapolate=True)
else:
g_extra = interp1d(w,g(old_wage,w),fill_value="extrapolate",kind='linear')
num=0
for ind,wage in enumerate(wages):
if old_wage>max_wage_old and self.extrapolate:
if wage>max_wage:
num=num+1
#print(num)
vh=np.zeros(n_palkka)
for k,wg in enumerate(w):
gw=interp1d(w_old,values[:,k],fill_value="extrapolate",kind='linear') #self.spline_approx)
vh[k]=gw(old_wage)
h=interp1d(w,vh,fill_value="extrapolate",kind='linear') #self.spline_approx)
if self.minbispline:
Vs[ind] = max(values[0,0],np.squeeze(h(wage)))
else:
Vs[ind] = max(0.0,np.squeeze(h(wage)))
else:
if self.monotone_spline:
hwx = PchipInterpolator(w_old,g(w_old,wage)[:,0],extrapolate=True)
else:
hwx=interp1d(w_old,g(w_old,wage)[:,0],fill_value="extrapolate",kind='linear') #self.spline_approx)
if self.minbispline:
Vs[ind] = max(values[0,0],np.squeeze(hwx(old_wage)))
else:
Vs[ind] = max(0.0,np.squeeze(hwx(old_wage)))
else:
if wage>max_wage and self.extrapolate:
if self.minbispline:
Vs[ind] = max(values[0,0],np.squeeze(g_extra(wage)))
else:
Vs[ind] = max(0.0,np.squeeze(g_extra(wage)))
else:
V1 = np.squeeze(g(old_wage,wage))
if self.minbispline:
Vs[ind]=max(values[0,0],V1)
else:
Vs[ind]=max(0.0,V1)
#if debug:
# print(f'w {wage} V1 {V1}')
#if show:
# print(f'getV({emp},{elake},{old_wage},{wage}): p2min {p2min} p2max {p2max} wp2 {wp2})')
# print(self.Hila[t,pmin,emin,emp,tismax,p2min],self.Hila[t,pmin,emin,emp,tismax,p2max])
return Vs
def map_grid_age(self,age):
return int(np.round(age-self.min_grid_age))
def plot_Hila(self,age,emp=1,time_in_state=1,diff=False):
x= | np.arange(self.min_wage,100000,1000) | numpy.arange |
from __future__ import print_function
import unittest
from SimPEG import Mesh, Utils
import numpy as np
import SimPEG.EM.Static.DC as DC
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
from geoana.em import fdem
from scipy.constants import mu_0, epsilon_0
class DC_CC_DipoleFullspaceTests(unittest.TestCase):
def setUp(self):
cs = 0.5
npad = 11
hx = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hy = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hz = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
mesh = Mesh.TensorMesh([hx, hy, hz], x0="CCC")
sigma = np.ones(mesh.nC)*1e-2
# Set up survey parameters for numeric solution
x = mesh.vectorCCx[(mesh.vectorCCx > -75.) & (mesh.vectorCCx < 75.)]
y = mesh.vectorCCy[(mesh.vectorCCy > -75.) & (mesh.vectorCCy < 75.)]
Aloc = np.r_[1.0, 0., 0.]
Bloc = np.r_[-1.0, 0., 0.]
M = Utils.ndgrid(x-25., y, np.r_[0.])
N = Utils.ndgrid(x+25., y, np.r_[0.])
rx = DC.Rx.Dipole(M, N)
src = DC.Src.Dipole([rx], Aloc, Bloc)
survey = DC.Survey([src])
# Create Dipole Obj for Analytic Solution
edipole = fdem.ElectricDipoleWholeSpace(
sigma=1e-2, # conductivity of 1 S/m
mu=mu_0, # permeability of free space (this is the default)
epsilon=epsilon_0, # permittivity of free space (this is the default)
location=np.r_[0., 0., 0.], # location of the dipole
orientation='X', # horizontal dipole (can also be a unit-vector)
quasistatic=True, # don't use the quasistatic assumption
frequency=0.0, # DC
length=2.0 # length of dipole
)
# evaluate the electric field and current density
Ex_analytic = np.zeros_like([mesh.nFx,1])
Ey_analytic = np.zeros_like([mesh.nFy,1])
Ez_analytic = np.zeros_like([mesh.nFz,1])
Ex_analytic = np.real(edipole.electric_field(mesh.gridFx))[:,0]
Ey_analytic = np.real(edipole.electric_field(mesh.gridFy))[:,1]
Ez_analytic = np.real(edipole.electric_field(mesh.gridFz))[:,2]
E_analytic = np.hstack([Ex_analytic,Ey_analytic,Ez_analytic])
Jx_analytic = np.zeros_like([mesh.nFx,1])
Jy_analytic = np.zeros_like([mesh.nFy,1])
Jz_analytic = np.zeros_like([mesh.nFz,1])
Jx_analytic = np.real(edipole.current_density(mesh.gridFx))[:,0]
Jy_analytic = np.real(edipole.current_density(mesh.gridFy))[:,1]
Jz_analytic = np.real(edipole.current_density(mesh.gridFz))[:,2]
J_analytic = np.hstack([Jx_analytic,Jy_analytic,Jz_analytic])
# Find faces at which to compare solutions
faceGrid = np.vstack([mesh.gridFx,mesh.gridFy,mesh.gridFz])
# print(faceGrid.shape)
ROI_large_BNW = np.array([-75,75,-75])
ROI_large_TSE = np.array([75,-75,75])
ROI_largeInds = Utils.ModelBuilder.getIndicesBlock(ROI_large_BNW,ROI_large_TSE,faceGrid)[0]
# print(ROI_largeInds.shape)
ROI_small_BNW = np.array([-4,4,-4])
ROI_small_TSE = np.array([4,-4,4])
ROI_smallInds = Utils.ModelBuilder.getIndicesBlock(ROI_small_BNW,ROI_small_TSE,faceGrid)[0]
# print(ROI_smallInds.shape)
ROIfaceInds = np.setdiff1d(ROI_largeInds,ROI_smallInds)
# print(ROIfaceInds.shape)
# print(len(ROI_largeInds) - len(ROI_smallInds))
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.E_analytic = E_analytic
self.J_analytic = J_analytic
self.ROIfaceInds = ROIfaceInds
def test_Problem3D_CC_Dirchlet(self, tolerance=0.1):
problem = DC.Problem3D_CC(
self.mesh, sigma=self.sigma, bc_type='Dirchlet'
)
problem.Solver = Solver
problem.pair(self.survey)
f = problem.fields()
eNumeric = Utils.mkvc(f[self.survey.srcList,'e'])
jNumeric = Utils.mkvc(f[self.survey.srcList,'j'])
errE = (
np.linalg.norm(jNumeric[self.ROIfaceInds] - self.J_analytic[self.ROIfaceInds]) /
| np.linalg.norm(self.J_analytic[self.ROIfaceInds]) | numpy.linalg.norm |
import numpy as np
import numpy.linalg as la
from io import StringIO
def print_mat(mat):
stream = StringIO()
np.savetxt(stream, mat, fmt="%.3f")
print( stream.getvalue() )
# -----------------------------------
def get_convergent_vector(L, r_0, threshold=0.01):
'''
:param L: transition matrix
:param r_0: initial vector
:param threshold: parameter for convergence condition
:return: convergent vector
'''
r_cur = r_0
while True:
r_next = np.matmul(L, r_cur)
if la.norm(r_next - r_cur) < threshold:
# check convergence condition is met or not
break
r_cur = r_next
return r_cur
# -----------------------------------
def get_matrix_with_damping(matrix, damping=0.5):
# get the size of matrix
n, _ = matrix.shape
return matrix * damping + ( 1 - damping ) * np.ones((n, n)) / n
# -----------------------------------
# get the size
n = int( input() )
# get the name of website
websites = input().split()
# get transition matrix
matrix = [ [ 0.0 for x in range(n)] for y in range(n) ]
for y in range(n):
matrix[y] = [ *map( float, input().split() ) ]
# get the name of target website
target = input()
# convert to numpy array
matrix = | np.array(matrix) | numpy.array |
from numba import njit
import numpy as np
from numpy import pi, inf, NINF, float64, finfo
from numpy.random import rand
import math
from math import isnan, cos, log, exp
import random
from tpg.utils import flip
import uuid
import copy
"""
A program that is executed to help obtain the bid for a learner.
"""
class ConfProgram:
def init_def(self, instructions=None, maxProgramLength=128, nOperations=5,
nDestinations=8, inputSize=30720, initParams=None):
if instructions is not None: # copy from existing
self.instructions = np.array(instructions, dtype=np.int32)
else: # create random new
self.instructions = np.array([
(random.randint(0,1),
random.randint(0, nOperations-1),
random.randint(0, nDestinations-1),
random.randint(0, inputSize-1))
for _ in range(random.randint(1, maxProgramLength))], dtype=np.int32)
self.id = uuid.uuid4()
"""
Executes the program which returns a single final value.
"""
@njit
def execute_def(inpt, regs, modes, ops, dsts, srcs):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
if math.isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == np.inf:
regs[dest] = np.finfo(np.float64).max
elif regs[dest] == np.NINF:
regs[dest] = np.finfo(np.float64).min
"""
Executes the program which returns a single final value using shared memory.
"""
@njit
def execute_mem(inpt, regs, modes, ops, dsts, srcs,
memMatrix, memRows, memCols, memWriteProbFunc):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
index = srcs[i]
index %= (memRows*memCols)
row = int(index / memRows)
col = index % memCols
regs[dest] = memMatrix[row, col]
elif op == 6:
# row offset (start from center, go to edges)
halfRows = int(memRows/2) # halfRows
for i in range(halfRows):
# probability to write (gets smaller as i increases)
# TODO: swap out write prob func by passing in an array of values for that row.
writeProb = memWriteProbFunc(i)
# column to maybe write corresponding value into
for col in range(memCols):
# try write to lower half
if rand(1)[0] < writeProb:
row = (halfRows - i) - 1
memMatrix[row,col] = regs[col]
# try write to upper half
if rand(1)[0] < writeProb:
row = halfRows + i
memMatrix[row,col] = regs[col]
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = finfo(float64).max
elif regs[dest] == NINF:
regs[dest] = finfo(float64).min
"""
Executes the program which returns a single final value.
"""
@njit
def execute_full(inpt, regs, modes, ops, dsts, srcs):
regSize = len(regs)
inptLen = len(inpt)
for i in range(len(modes)):
# first get source
if modes[i] == 0:
src = regs[srcs[i]%regSize]
else:
src = inpt[srcs[i]%inptLen]
# get data for operation
op = ops[i]
x = regs[dsts[i]]
y = src
dest = dsts[i]%regSize
# do an operation
if op == 0:
regs[dest] = x+y
elif op == 1:
regs[dest] = x-y
elif op == 2:
regs[dest] = x*2
elif op == 3:
regs[dest] = x/2
elif op == 4:
if x < y:
regs[dest] = x*(-1)
elif op == 5:
regs[dest] = cos(y)
elif op == 6:
if y > 0:
regs[dest] = log(y)
elif op == 7:
regs[dest] = exp(y)
if isnan(regs[dest]):
regs[dest] = 0
elif regs[dest] == inf:
regs[dest] = | finfo(float64) | numpy.finfo |
"""
Code to calculate spin-orbit spillage.
Modified from JARVIS-Tools
https://www.nature.com/articles/s41598-019-45028-y
https://www.nature.com/articles/s41524-020-0319-4
"""
import numpy as np
from pymatgen.io.vasp.outputs import Wavecar
class SOCSpillage:
"""
Spin-orbit spillage criteria to predict whether a material is topologically non-trival.
The spillage criteria physically signifies number of band-inverted electrons.
A non-zero, high value (generally >0.5) suggests non-trivial behavior
"""
def __init__(self, wf_noso="", wf_so=""):
"""
Requires path to WAVECAR files with and without LSORBIT = .TRUE.
Args:
wf_noso : WAVECAR without spin-orbit coupling
wf_so : WAVECAR with spin-orbit coupling
"""
self.wf_noso = wf_noso
self.wf_so = wf_so
@staticmethod
def isclose(n1, n2, rel_tol=1e-7):
"""
Checking if the numbers are close enough
"""
return abs(n1 - n2) < rel_tol
@staticmethod
def orth(A):
"""
Helper function to create orthonormal basis
"""
u, s, vh = np.linalg.svd(A, full_matrices=False)
M, N = A.shape
eps = np.finfo(float).eps
tol = max(M, N) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
Q = u[:, :num]
return Q, num
def overlap_so_spinpol(self):
"""
Main function to calculate SOC spillage
"""
noso = Wavecar(self.wf_noso)
so = Wavecar(self.wf_so)
bcell = np.linalg.inv(noso.a).T
tmp = np.linalg.norm(np.dot(np.diff(noso.kpoints, axis=0), bcell), axis=1)
noso_k = np.concatenate(([0], np.cumsum(tmp)))
noso_bands = np.array(noso.band_energy)[:, :, :, 0]
noso_kvecs = np.array(noso.kpoints)
noso_occs = | np.array(noso.band_energy) | numpy.array |
"""Helper functions."""
from dataclasses import dataclass
from importlib import import_module
from numbers import Integral
import shutil
from typing import (
Any,
Callable,
cast,
Iterable,
List,
Literal,
Optional,
overload,
Sequence,
Union,
)
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
import scipy.spatial
from morfeus.data import (
atomic_numbers,
atomic_symbols,
cov_radii_pyykko,
radii_alvarez,
radii_bondi,
radii_crc,
radii_rahm,
radii_truhlar,
)
from morfeus.typing import Array2D, ArrayLike1D, ArrayLike2D
def get_excluded_from_connectivity(
connectivity_matrix: ArrayLike2D,
center_atoms: ArrayLike1D,
connected_atoms: ArrayLike1D,
) -> List[int]:
"""Get atom indices to exclude bassed on connectivity and fragmentation.
Convenience function that determines atoms to exclude from a calculation of a larger
structure with multiple fragments. Connected atoms belong to the fragment of
interest, e.g., a ligand. Center atoms are those of e.g. a central metal atom that.
By default, the center atoms are added to the excluded ones.
Args:
connectivity_matrix: Connectivity matrix
center_atoms: Atoms of central unit which connects to fragment (1-indexed)
connected_atoms: Atoms of fragment (1-indexed)
Returns:
excluded_atoms: Atom indices to exclude
Raises:
ValueError: When connected atoms belong to different fragments or when connected
atoms belong to same fragment as other neighbors of center atoms (1-indexed)
"""
connectivity_matrix = np.array(connectivity_matrix)
center_atoms = np.array(center_atoms).reshape(-1) - 1
connected_atoms = np.array(connected_atoms).reshape(-1) - 1
# Determine other neihgbors to the central atoms
other_neighbors = set(
connectivity_matrix[center_atoms].reshape(-1).nonzero()[0]
).difference(connected_atoms)
# Calculate fragment labels
mask = np.ones(len(connectivity_matrix), dtype=bool)
mask[center_atoms] = False
graph = csr_matrix(connectivity_matrix)[mask, :][:, mask]
n_components, labels = connected_components(
csgraph=graph, directed=False, return_labels=True
)
# Take out labels and check for errors
connected_labels = set([labels[i] for i in connected_atoms])
if len(connected_labels) > 1:
raise ValueError("Connected atoms belong to different fragments.")
neighbor_labels = set([labels[i] for i in other_neighbors])
if len(neighbor_labels.intersection(connected_labels)) > 0:
raise ValueError(
"Connected atoms belong to same fragment as other neighbor of center atoms."
)
ref_label = list(connected_labels)[0]
excluded_atoms = list(np.where(labels != ref_label)[0] + 1)
return excluded_atoms
def check_distances(
elements: Union[Iterable[int], Iterable[str]],
coordinates: ArrayLike2D,
check_atom: int,
radii: Optional[ArrayLike1D] = None,
check_radius: float = 0,
excluded_atoms: Optional[Sequence[int]] = None,
epsilon: float = 0,
radii_type: str = "crc",
) -> List[int]:
"""Check which atoms are within clashing vdW radii distances.
Args:
elements: Elements as atomic symbols or numbers
coordinates: Coordinates (Å)
check_atom: Index of atom to check against (1-indexed)
radii: vdW radii (Å)
check_radius: Radius to use for check_atom (Å)
excluded_atoms: Atom indices to exclude (1-indexed)
epsilon: Numeric term add to the radii (Å)
radii_type: Radii type: 'alvarez', 'bondi', 'crc', 'pyykko', 'rahm' or 'truhlar'
Returns:
within_list: Atom indices within vdW distance of check atom.
"""
# Convert elements to atomic numbers if the are symbols
elements = convert_elements(elements, output="numbers")
# Get radii if they are not supplied
if radii is None:
radii = get_radii(elements, radii_type=radii_type)
radii = | np.array(radii) | numpy.array |
"""Implements the bilateral filter for images."""
from numpy import ceil, exp, dot, ogrid, arange
def bilateral_filter(im, size=None, sigma_r=None, sigma_d=1, **kwargs):
"""
Bilaterally filter an image. Uses Gaussian kernels for the spatial and intensity filters.
im is the image to filter, must be grayscale but can be any dimension
size is the kernel size, must be odd and >=3, defaults to int(max(5, 2*ceil(3*sigma_d)+1)).
sigma_r is the range/intensity standard deviation, defaults to image standard deviation.
sigma_d is the domain/spatial standard deviation, default to 1.
other keyword arguments are passed to scipy.ndimage.generic_filter.
This attempts to use a Cython optimized function if possible. Additionally in common cases many
parts are computed to greatly speed up the function.
REFERENCES
1. <NAME> and <NAME>, 1998, "Bilateral filtering for gray and color images". Sixth
International Conference on Computer Vision. pp. 839–846.
2. <NAME> and <NAME>, 2008, "Enhancing Contrast in Color Images Using Bilateral Filter and
Histogram Equalization Using Wavelet Coefficients", 2008 Second International Conference on
Future Generation Communication and Networking Symposia.
"""
from scipy.ndimage import generic_filter
if sigma_r is None: sigma_r = im.std()
if size is None:
size = int(max(5, 2*ceil(3*sigma_d)+1))
elif size < 3 or size%2 != 1:
raise ValueError(size)
# Calculate the kernels
spatial, scale, inten_lut = __bilateral_kernels(im.dtype, im.ndim, size, sigma_r, sigma_d)
try:
# Try to import Cython optimized code - 20 to 75x faster
from scipy import LowLevelCallable
import hist.exact.__bilateral_cy as cy
_bilateral_filter = LowLevelCallable.from_cython(
cy, 'bilateral_filter' if inten_lut is None else 'bilateral_filter_inten_lut',
cy.get_user_data(spatial, scale, inten_lut)) # pylint: disable=c-extension-no-member
except ImportError:
# Fallback to pure Python function
# Note: it seems the pure Python function actually gets slower with the intensity LUT
def _bilateral_filter(data):
diff = data - data[data.size // 2]
weight = exp(diff*diff*scale) * spatial
return dot(data, weight) / weight.sum()
return generic_filter(im, _bilateral_filter, size, **kwargs)
def __bilateral_kernels(dt, ndim, size, sigma_r, sigma_d):
"""
Computes the spatial kernel and the intensity kernel scale. Also computes the intensity LUT if
it makes sense. If not None is returned in its place.
"""
from ..util import get_dtype_min_max
# Calculate the fixed spatial kernel
scale = -1/(2*sigma_d*sigma_d)
dist2 = sum(x*x for x in ogrid[(slice(-(size//2), size//2+1),)*ndim])
spatial = (dist2*scale).ravel()
exp(spatial, spatial)
spatial /= spatial.sum()
# Calculate the complete intensity LUT kernel if it makes sense
# Don't do this for 32-bit+ integral images or floating-point images
scale = -1/(2*sigma_r*sigma_r)
intensity_lut = None
if dt.kind in 'uib' and dt.itemsize <= 2:
mn, mx = get_dtype_min_max(dt)
intensity_lut = arange(0, mx-mn+1)
intensity_lut *= intensity_lut
intensity_lut = intensity_lut * scale
| exp(intensity_lut, intensity_lut) | numpy.exp |
import logging
import numpy as np
import pandas as pd
def feature_position(hdim1_indices,hdim2_indeces,region,track_data,threshold_i,position_threshold, target):
'''
function to determine feature position
Input:
hdim1_indices: list
hdim2_indeces: list
region: list
list of 2-element tuples
track_data: numpy.ndarray
2D numpy array containing the data
threshold_i: float
position_threshold: str
target: str
Output:
hdim1_index: float
feature position along 1st horizontal dimension
hdim2_index: float
feature position along 2nd horizontal dimension
'''
if position_threshold=='center':
# get position as geometrical centre of identified region:
hdim1_index=np.mean(hdim1_indices)
hdim2_index=np.mean(hdim2_indeces)
elif position_threshold=='extreme':
#get position as max/min position inside the identified region:
if target == 'maximum':
index=np.argmax(track_data[region])
hdim1_index=hdim1_indices[index]
hdim2_index=hdim2_indeces[index]
if target == 'minimum':
index=np.argmin(track_data[region])
hdim1_index=hdim1_indices[index]
hdim2_index=hdim2_indeces[index]
elif position_threshold=='weighted_diff':
# get position as centre of identified region, weighted by difference from the threshold:
weights=abs(track_data[region]-threshold_i)
if sum(weights)==0:
weights=None
hdim1_index=np.average(hdim1_indices,weights=weights)
hdim2_index= | np.average(hdim2_indeces,weights=weights) | numpy.average |
from typing import Callable, Tuple, List, Union
# anaconda API
import numpy as np
from numpy.random import RandomState
from scipy.signal import lfilter
# custom types
from numpy import ndarray
from pandas import Series
# API
from tsoracle.API import Generator
from tsoracle import plotting, factor
# functional API
def noise(var: Union[float, int],
size: int,
random_state: float = None) -> ndarray:
""" Generate sequential noise from a random normal .
Parameters
----------
var: scalar float
Nosie variance level.
size: scalar int
Number of samples to generate, strictly positive.
random_state: scalar int, optional
Seed the random number generator
Returns
-------
noise: np.ndarray
Sequential noise.
"""
if size < 1:
raise ValueError('The value for size must be strictly positive')
if var == 0:
noise_signal = np.zeros(size)
else:
noise_signal = RandomState(random_state).normal(scale = np.sqrt(var),
size = size)
return noise_signal
def linear(intercept: float,
slope: float,
size: int,
var: float = 0.01,
random_state: float = None):
""" Generate linear signal plus noise.
Parameters
----------
intercept: scalar float
Intercept of linear signal.
slope: scalar float
Slope of linear signal.
size: scalar int
Number of samples to generate.
var: scalar float, optional
Nosie variance level.
random_state: scalar int, optional
Seed the random number generator
Returns
-------
signal: np.ndarray
Sequential linear signal.
"""
# check for input errors
if size < 1:
raise ValueError('The value for size must be strictly positive')
# generate time samples
time_index = np.arange(size)
# get noise
sig_noise = noise(var = var,
size = time_index.size,
random_state = random_state)
# calculate signal
signal = slope * time_index + intercept
return signal + sig_noise
def sinusoidal(mag: Union[float, ndarray, Series, List],
freq: Union[float, ndarray, Series, List],
shift: Union[float, ndarray, Series, List],
size: int,
var: float = 0.01,
random_state: float = None):
""" Generate sinusoidal signal plus noise.
Parameters
----------
mag: scalar or list-like
Signal magnitudes(ies).
freq: scalar or list-like
Signal frequency(ies).
shift: scalar or list-like
Phase shift(s).
size: scalar int
Number of samples to generate.
var: scalar float, optional
Nosie variance level.
random_state: scalar int, optional
Seed the random number generator.
Returns
-------
signal: np.ndarray
Sequential sinusoidal signal.
"""
mag = np.array(mag).reshape(np.array(mag).size, 1)
freq = np.array(freq).reshape( | np.array(freq) | numpy.array |
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import numpy
import awkward1._util
import awkward1._connect._numpy
import awkward1.layout
import awkward1.operations.convert
def count(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([numpy.size(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.count(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.count_nonzero)
def count_nonzero(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([numpy.count_nonzero(x) for x in awkward1._util.completely_flatten(layout)])
else:
behavior = awkward1._util.behaviorof(array)
return awkward1._util.wrap(layout.count_nonzero(axis=axis, mask=maskidentity, keepdims=keepdims), behavior)
@awkward1._connect._numpy.implements(numpy.sum)
def sum(array, axis=None, keepdims=False, maskidentity=False):
layout = awkward1.operations.convert.tolayout(array, allowrecord=False, allowother=False)
if axis is None:
def reduce(xs):
if len(xs) == 1:
return xs[0]
else:
return xs[0] + reduce(xs[1:])
return reduce([ | numpy.sum(x) | numpy.sum |
import time
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import math as m
import numpy as np
import pandas as pd
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import math
from tensorflow import keras
from tensorflow.keras import layers
from random import shuffle
from keras import backend as K
import numpy as np
import keras.datasets
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
from sklearn import model_selection
import scipy
import time
from footprints_and_cutouts import preprocess_scenes
(train_cutouts_blended_allScenes, train_cutouts_unblended_allScenes,
train_blended_mag_filters_allScenes, train_unblended_mag_filters_allScenes) = preprocess_scenes(train=True,use_pipeline_segmap=True)
(test_cutouts_blended_allScenes, test_cutouts_unblended_allScenes,
test_blended_mag_filters_allScenes, test_unblended_mag_filters_allScenes) = preprocess_scenes(train=False,use_pipeline_segmap=True)
train_cutouts_allScenes = []
train_cutouts_labels = []
train_mag_filter = []
count=0
for i in train_cutouts_unblended_allScenes:
train_cutouts_allScenes.append(i)
train_cutouts_labels.append([1,0])
if train_unblended_mag_filters_allScenes[21.5][count]:
train_mag_filter.append(21.5)
elif train_unblended_mag_filters_allScenes[22.5][count]:
train_mag_filter.append(22.5)
elif train_unblended_mag_filters_allScenes[23.5][count]:
train_mag_filter.append(23.5)
elif train_unblended_mag_filters_allScenes[24.5][count]:
train_mag_filter.append(24.5)
elif train_unblended_mag_filters_allScenes[25.5][count]:
train_mag_filter.append(25.5)
elif train_unblended_mag_filters_allScenes[26.5][count]:
train_mag_filter.append(26.5)
else:
train_mag_filter.append(0)
count+=1
count = 0
for i in train_cutouts_blended_allScenes:
train_cutouts_allScenes.append(i)
train_cutouts_labels.append([0,1])
if train_blended_mag_filters_allScenes[21.5][count]:
train_mag_filter.append(21.5)
elif train_blended_mag_filters_allScenes[22.5][count]:
train_mag_filter.append(22.5)
elif train_blended_mag_filters_allScenes[23.5][count]:
train_mag_filter.append(23.5)
elif train_blended_mag_filters_allScenes[24.5][count]:
train_mag_filter.append(24.5)
elif train_blended_mag_filters_allScenes[25.5][count]:
train_mag_filter.append(25.5)
elif train_blended_mag_filters_allScenes[26.5][count]:
train_mag_filter.append(26.5)
else:
train_mag_filter.append(0)
count+=1
test_cutouts_allScenes = []
test_cutouts_labels = []
test_mag_filter = []
count=0
for i in test_cutouts_unblended_allScenes:
test_cutouts_allScenes.append(i)
test_cutouts_labels.append([1,0])
if test_unblended_mag_filters_allScenes[21.5][count]:
test_mag_filter.append(21.5)
elif test_unblended_mag_filters_allScenes[22.5][count]:
test_mag_filter.append(22.5)
elif test_unblended_mag_filters_allScenes[23.5][count]:
test_mag_filter.append(23.5)
elif test_unblended_mag_filters_allScenes[24.5][count]:
test_mag_filter.append(24.5)
elif test_unblended_mag_filters_allScenes[25.5][count]:
test_mag_filter.append(25.5)
elif test_unblended_mag_filters_allScenes[26.5][count]:
test_mag_filter.append(26.5)
else:
test_mag_filter.append(0)
count+=1
count = 0
for i in test_cutouts_blended_allScenes:
test_cutouts_allScenes.append(i)
test_cutouts_labels.append([0,1])
if test_blended_mag_filters_allScenes[21.5][count]:
test_mag_filter.append(21.5)
elif test_blended_mag_filters_allScenes[22.5][count]:
test_mag_filter.append(22.5)
elif test_blended_mag_filters_allScenes[23.5][count]:
test_mag_filter.append(23.5)
elif test_blended_mag_filters_allScenes[24.5][count]:
test_mag_filter.append(24.5)
elif test_blended_mag_filters_allScenes[25.5][count]:
test_mag_filter.append(25.5)
elif test_blended_mag_filters_allScenes[26.5][count]:
test_mag_filter.append(26.5)
else:
test_mag_filter.append(0)
count+=1
for _ in np.arange(23):
trainx,testx,trainy,testy,trainmag,testmag = train_cutouts_allScenes,test_cutouts_allScenes,train_cutouts_labels,test_cutouts_labels,train_mag_filter,test_mag_filter
trainx2 = np.log10(np.array(trainx)+10**-8)
testx2 = np.log10(np.array(testx)+10**-8)
trainxnorm = (trainx2 - np.min(trainx2))/(np.max(trainx2)-np.min(trainx2))
testxnorm = (testx2 - np.min(testx2))/(np.max(testx2)-np.min(testx2))
input_shape = (23, 23, 1)
num_classes=2
model = keras.Sequential()
model.add(Conv2D(128, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(800,activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(400,activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(200,activation = 'relu'))
model.add(Dense(num_classes, activation="softmax"))
epochs=20
model.compile(loss="binary_crossentropy", optimizer='adam', metrics=["accuracy"])
time_start = time.time()
model.fit(np.reshape(trainxnorm,(len(trainx),23,23,1)), np.array(trainy), epochs=15,verbose=True,batch_size=200,validation_split = .1)
train_time = time.time()-time_start
mean_loss_xxx = model.evaluate(np.array(np.reshape(testxnorm,(len(testx),23,23,1))),np.array(testy))
bce=[]
count = 0
for i in testxnorm:
bce.append(model.evaluate(np.array(np.reshape(i,(1,23,23,1))),np.array([testy[count]]))[0])
count+=1
standard_error_bce = scipy.stats.sem(bce)
classified = []
blended_predict = []
count = []
for i in model.predict(np.reshape(testxnorm,(len(testx),23,23,1))):
if i[0]>i[1]:
classified.append([1,0])
else:
classified.append([0,1])
blended_predict.append(i[1])
blended_predict
arr_21 = []
arr_22 = []
arr_23 = []
arr_24 = []
arr_25 = []
arr_26 = []
count = 0
for i in np.array(testy)==classified:
if testmag[count] == 21.5:
arr_21.append([i[0],testy[count]])
if testmag[count] == 22.5:
arr_22.append([i[0],testy[count]])
if testmag[count] == 23.5:
arr_23.append([i[0],testy[count]])
if testmag[count] == 24.5:
arr_24.append([i[0],testy[count]])
if testmag[count] == 25.5:
arr_25.append([i[0],testy[count]])
if testmag[count] == 26.5:
arr_26.append([i[0],testy[count]])
count+=1
arr_21_unblended = []
arr_21_blended = []
for i in arr_21:
if i[1] == [1,0]:
arr_21_unblended.append(i[0])
else:
arr_21_blended.append(i[0])
arr_22_unblended = []
arr_22_blended = []
for i in arr_22:
if i[1] == [1,0]:
arr_22_unblended.append(i[0])
else:
arr_22_blended.append(i[0])
arr_23_unblended = []
arr_23_blended = []
for i in arr_23:
if i[1] == [1,0]:
arr_23_unblended.append(i[0])
else:
arr_23_blended.append(i[0])
arr_24_unblended = []
arr_24_blended = []
for i in arr_24:
if i[1] == [1,0]:
arr_24_unblended.append(i[0])
else:
arr_24_blended.append(i[0])
arr_25_unblended = []
arr_25_blended = []
for i in arr_25:
if i[1] == [1,0]:
arr_25_unblended.append(i[0])
else:
arr_25_blended.append(i[0])
arr_26_unblended = []
arr_26_blended = []
for i in arr_26:
if i[1] == [1,0]:
arr_26_unblended.append(i[0])
else:
arr_26_blended.append(i[0])
unblended = [['accuracy','# of samples', 'variance of # of accurately classified samples'],[np.count_nonzero(arr_21_unblended)/len(arr_21_unblended),len(arr_21_unblended)],
[np.count_nonzero(arr_22_unblended)/len(arr_22_unblended),len(arr_22_unblended)],
[np.count_nonzero(arr_23_unblended)/len(arr_23_unblended),len(arr_23_unblended)],
[np.count_nonzero(arr_24_unblended)/len(arr_24_unblended),len(arr_24_unblended)],
[np.count_nonzero(arr_25_unblended)/len(arr_25_unblended),len(arr_25_unblended)],
[np.count_nonzero(arr_26_unblended)/len(arr_26_unblended),len(arr_26_unblended)]]
blended = [['accuracy','# of samples', 'variance of # of accurately classified samples'],[np.count_nonzero(arr_21_blended)/len(arr_21_blended),len(arr_21_blended)],
[np.count_nonzero(arr_22_blended)/len(arr_22_blended),len(arr_22_blended)],
[np.count_nonzero(arr_23_blended)/len(arr_23_blended),len(arr_23_blended)],
[np.count_nonzero(arr_24_blended)/len(arr_24_blended),len(arr_24_blended)],
[np.count_nonzero(arr_25_blended)/len(arr_25_blended),len(arr_25_blended)],
[np.count_nonzero(arr_26_blended)/len(arr_26_blended),len(arr_26_blended)]]
for i in unblended[1:]:
i.append(np.sqrt(i[0]*i[1]*(1-i[0])))
for i in blended[1:]:
i.append(np.sqrt(i[0]*i[1]*(1-i[0])))
unblended = np.array(unblended)
blended = np.array(blended)
overall_blended_accuracy = np.sum(i[0].astype(float)*i[1].astype(float) for i in blended[1:].astype(float))/np.sum(i[1].astype(float) for i in blended[1:].astype(float))
overall_unblended_accuracy = np.sum(i[0].astype(float)*i[1].astype(float) for i in unblended[1:].astype(float))/np.sum(i[1].astype(float) for i in unblended[1:].astype(float))
blended_predict_0 = []
blended_predict_1 = []
blended_predict_2 = []
blended_predict_3 = []
blended_predict_4 = []
blended_predict_5 = []
blended_predict_6 = []
blended_predict_7 = []
blended_predict_8 = []
blended_predict_9 = []
count = 0
for i in blended_predict:
if i <.1:
blended_predict_0.append([[0,1]==testy[count],blended_predict[count]])
if i >=.1 and i<.2:
blended_predict_1.append([[0,1]==testy[count],blended_predict[count]])
if i >=.2 and i<.3:
blended_predict_2.append([[0,1]==testy[count],blended_predict[count]])
if i >=.3 and i<.4:
blended_predict_3.append([[0,1]==testy[count],blended_predict[count]])
if i >=.4 and i<.5:
blended_predict_4.append([[0,1]==testy[count],blended_predict[count]])
if i >=.5 and i<.6:
blended_predict_5.append([[0,1]==testy[count],blended_predict[count]])
if i >=.6 and i<.7:
blended_predict_6.append([[0,1]==testy[count],blended_predict[count]])
if i >=.7 and i<.8:
blended_predict_7.append([[0,1]==testy[count],blended_predict[count]])
if i >=.8 and i<.9:
blended_predict_8.append([[0,1]==testy[count],blended_predict[count]])
if i >=.9:
blended_predict_9.append([[0,1]==testy[count],blended_predict[count]])
count+=1
blended_predict_0 = np.array(blended_predict_0)
blended_predict_1 = np.array(blended_predict_1)
blended_predict_2 = np.array(blended_predict_2)
blended_predict_3 = np.array(blended_predict_3)
blended_predict_4 = np.array(blended_predict_4)
blended_predict_5 = np.array(blended_predict_5)
blended_predict_6 = np.array(blended_predict_6)
blended_predict_7 = np.array(blended_predict_7)
blended_predict_8 = np.array(blended_predict_8)
blended_predict_9 = np.array(blended_predict_9)
cal_0 = np.count_nonzero(blended_predict_0[:,0])/len(blended_predict_0)
cal_1 = np.count_nonzero(blended_predict_1[:,0])/len(blended_predict_1)
cal_2 = np.count_nonzero(blended_predict_2[:,0])/len(blended_predict_2)
cal_3 = np.count_nonzero(blended_predict_3[:,0])/len(blended_predict_3)
cal_4 = np.count_nonzero(blended_predict_4[:,0])/len(blended_predict_4)
cal_5 = np.count_nonzero(blended_predict_5[:,0])/len(blended_predict_5)
cal_6 = np.count_nonzero(blended_predict_6[:,0])/len(blended_predict_6)
cal_7 = np.count_nonzero(blended_predict_7[:,0])/len(blended_predict_7)
cal_8 = np.count_nonzero(blended_predict_8[:,0])/len(blended_predict_8)
cal_9 = np.count_nonzero(blended_predict_9[:,0])/len(blended_predict_9)
mean_0 = np.mean(blended_predict_0[:,1])
mean_1 = np.mean(blended_predict_1[:,1])
mean_2 = np.mean(blended_predict_2[:,1])
mean_3 = np.mean(blended_predict_3[:,1])
mean_4 = np.mean(blended_predict_4[:,1])
mean_5 = np.mean(blended_predict_5[:,1])
mean_6 = np.mean(blended_predict_6[:,1])
mean_7 = np.mean(blended_predict_7[:,1])
mean_8 = np.mean(blended_predict_8[:,1])
mean_9 = np.mean(blended_predict_9[:,1])
percentile_0 = [np.percentile(blended_predict_0[:,1],16),np.percentile(blended_predict_0[:,1],84)]
percentile_1 = [np.percentile(blended_predict_1[:,1],16),np.percentile(blended_predict_1[:,1],84)]
percentile_2 = [np.percentile(blended_predict_2[:,1],16),np.percentile(blended_predict_2[:,1],84)]
percentile_3 = [ | np.percentile(blended_predict_3[:,1],16) | numpy.percentile |
"""
Functions to map between the coordinates in image pixel space and geographical coordinates.
"""
import logging
from typing import Tuple
from types import MethodType # for binding a method dynamically to a class
import numpy
from . import geocoords
from ..io.complex.sicd_elements.blocks import Poly2DType, XYZPolyType
from ..io.DEM.DEM import DTEDList, GeoidHeight, DTEDInterpolator
__classification__ = "UNCLASSIFIED"
__author__ = ("<NAME>", "<NAME>")
#############
# Ground-to-Image (aka Scene-to-Image) projection.
# noinspection PyUnusedLocal
def _validate_coords(coords, sicd):
if not isinstance(coords, numpy.ndarray):
coords = numpy.array(coords, dtype=numpy.float64)
orig_shape = coords.shape
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (1, -1))
if coords.shape[-1] != 3:
raise ValueError(
'The coords array must represent an array of points in ECF coordinates, '
'so the final dimension of coords must have length 3. Have coords.shape = {}'.format(coords.shape))
# TODO: possibly check for coordinates too far from the sicd box?
return coords, orig_shape
def _ground_to_image(coords, coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uProj,
row_col_transform, ipp_transform, delta_gp_max, max_iterations):
"""
Basic level helper function.
Parameters
----------
coords : numpy.ndarray|tuple|list
coa_proj : COAProjection
uGPN : numpy.ndarray
SCP : numpy.ndarray
SCP_Pixel : numpy.ndarray
uIPN : numpy.ndarray
sf : float
row_ss : float
col_ss : float
uProj : numpy.ndarray
row_col_transform : numpy.ndarray
ipp_transform : numpy.ndarray
delta_gp_max : float
max_iterations : int
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
g_n = coords.copy()
im_points = numpy.zeros((coords.shape[0], 2), dtype=numpy.float64)
delta_gpn = numpy.zeros((coords.shape[0],), dtype=numpy.float64)
cont = True
iteration = 0
matrix_transform = numpy.dot(row_col_transform, ipp_transform)
# (3 x 2)*(2 x 2) = (3 x 2)
while cont:
# TODO: is there any point in progressively stopping iteration?
# It doesn't really save much computation time.
# I set it to iterate over everything or nothing.
# project ground plane to image plane iteration
iteration += 1
dist_n = numpy.dot(SCP - g_n, uIPN)/sf # (N, )
i_n = g_n + numpy.outer(dist_n, uProj) # (N, 3)
delta_ipp = i_n - SCP # (N, 3)
ip_iter = numpy.dot(delta_ipp, matrix_transform) # (N, 2)
im_points[:, 0] = ip_iter[:, 0]/row_ss + SCP_Pixel[0]
im_points[:, 1] = ip_iter[:, 1]/col_ss + SCP_Pixel[1]
# transform to ground plane containing the scene points and check how it compares
p_n = _image_to_ground_plane(im_points, coa_proj, g_n, uGPN)
# compute displacement between scene point and this new projected point
diff_n = coords - p_n
disp_pn = numpy.linalg.norm(diff_n, axis=1)
# should we continue iterating?
cont = numpy.any(disp_pn > delta_gp_max) or (iteration <= max_iterations)
if cont:
g_n += diff_n
return im_points, delta_gpn, iteration
def ground_to_image(coords, sicd, delta_gp_max=None, max_iterations=10, block_size=50000,
delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Transforms a 3D ECF point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
delta_gp_max : float|None
Ground plane displacement tol (m). Defaults to 0.1*pixel.
max_iterations : int
maximum number of iterations to perform
block_size : int|None
size of blocks of coordinates to transform at a time
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for
expressing `delta_arp` and `delta_varp` parameters.
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following
the SICD convention, he upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
coords, orig_shape = _validate_coords(coords, sicd)
row_ss = sicd.Grid.Row.SS
col_ss = sicd.Grid.Col.SS
pixel_size = numpy.sqrt(row_ss*row_ss + col_ss*col_ss)
if delta_gp_max is None:
delta_gp_max = 0.1*pixel_size
delta_gp_max = float(delta_gp_max)
if delta_gp_max < 0.01*pixel_size:
delta_gp_max = 0.01*pixel_size
logging.warning('delta_gp_max was less than 0.01*pixel_size, '
'and has been reset to {}'.format(delta_gp_max))
coa_proj = COAProjection(sicd, delta_arp, delta_varp, range_bias, adj_params_frame)
# establishing the basic projection components
SCP_Pixel = sicd.ImageData.SCPPixel.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array() # unit normal in row direction
uCol = sicd.Grid.Col.UVectECF.get_array() # unit normal in column direction
uIPN = numpy.cross(uRow, uCol) # image plane unit normal
uIPN /= numpy.linalg.norm(uIPN) # NB: uRow/uCol may not be perpendicular
cos_theta = numpy.dot(uRow, uCol)
sin_theta = numpy.sqrt(1 - cos_theta*cos_theta)
ipp_transform = numpy.array([[1, -cos_theta], [-cos_theta, 1]], dtype=numpy.float64)/(sin_theta*sin_theta)
row_col_transform = numpy.zeros((3, 2), dtype=numpy.float64)
row_col_transform[:, 0] = uRow
row_col_transform[:, 1] = uCol
SCP = sicd.GeoData.SCP.ECF.get_array()
uGPN = sicd.PFA.FPN.get_array() if sicd.ImageFormation.ImageFormAlgo == 'PFA' \
else geocoords.wgs_84_norm(SCP)
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
uSPN = sicd.SCPCOA.look*numpy.cross(VARP_SCP_COA, SCP-ARP_SCP_COA)
uSPN /= numpy.linalg.norm(uSPN)
# uSPN - defined in section 3.1 as normal to instantaneous slant plane that contains SCP at SCP COA is
# tangent to R/Rdot contour at SCP. Points away from center of Earth. Use look to establish sign.
sf = float(numpy.dot(uSPN, uIPN)) # scale factor
# prepare the work space
coords_view = numpy.reshape(coords, (-1, 3)) # possibly or make 2-d flatten
num_points = coords_view.shape[0]
if block_size is None or num_points <= block_size:
image_points, delta_gpn, iters = _ground_to_image(
coords_view, coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, delta_gp_max, max_iterations)
else:
image_points = numpy.zeros((num_points, 2), dtype=numpy.float64)
delta_gpn = numpy.zeros((num_points, ), dtype=numpy.float64)
iters = numpy.zeros((num_points, ), dtype=numpy.int16)
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block+block_size, num_points)
image_points[start_block:end_block, :], delta_gpn[start_block:end_block], \
iters[start_block:end_block] = _ground_to_image(
coords_view[start_block:end_block, :], coa_proj, uGPN,
SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, delta_gp_max, max_iterations)
start_block = end_block
if len(orig_shape) == 1:
image_points = numpy.reshape(image_points, (-1,))
elif len(orig_shape) > 1:
image_points = numpy.reshape(image_points, orig_shape[:-1]+(2, ))
delta_gpn = numpy.reshape(delta_gpn, orig_shape[:-1])
iters = numpy.reshape(iters, orig_shape[:-1])
return image_points, delta_gpn, iters
def ground_to_image_geo(coords, sicd, **kwargs):
"""
Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates.
This is implemented in accordance with the SICD Image Projections Description Document.
Parameters
----------
coords : numpy.ndarray|tuple|list
Lat/Lon/HAE coordinate to map to scene coordinates, of size `N x 3`.
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
kwargs : dict
See the key word arguments of :func:`ground_to_image`
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
return ground_to_image(geocoords.geodetic_to_ecf(coords), sicd, **kwargs)
############
# Image-To-Ground projections
def _ric_ecf_mat(rarp, varp, frame_type):
"""
Computes the ECF transformation matrix for RIC frame.
Parameters
----------
rarp : numpy.ndarray
varp : numpy.ndarray
frame_type : str
the final three characters should be one of ['ECI', 'ECF']
Returns
-------
numpy.ndarray
the RIC transform matrix (array)
"""
# Angular velocity of earth in radians/second, not including precession
w = 7292115.1467E-11
typ = frame_type.upper()[-3:]
vi = varp if typ == 'ECF' else varp + numpy.cross([0, 0, w], rarp)
r = rarp/numpy.linalg.norm(rarp)
c = numpy.cross(r, vi)
c /= numpy.linalg.norm(c) # NB: perpendicular to r
i = numpy.cross(c, r)
# this is the cross of two perpendicular normal vectors, so normal
return numpy.array([r, i, c], dtype=numpy.float64)
class COAProjection(object):
"""
The COA projection object - provide common projection functionality for all Image-to-R/Rdot projection.
"""
def __init__(self, sicd, delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
The SICD metadata structure.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for
expressing `delta_arp` and `delta_varp` parameters.
"""
if not sicd.can_project_coordinates():
raise ValueError('Insufficient metadata populated to formulate projection.')
time_coa_poly = sicd.Grid.TimeCOAPoly
# fall back to approximation if TimeCOAPoly is not populated
if time_coa_poly is None:
time_coa_poly = Poly2DType(Coefs=[[sicd.Timeline.CollectDuration/2, ], ])
logging.warning(
'Using (constant) approximation to TimeCOAPoly, which may result in poor projection results.')
self.time_coa_poly = time_coa_poly # type: Poly2DType
self.arp_poly = sicd.Position.ARPPoly # type: XYZPolyType
self.varp_poly = self.arp_poly.derivative(der_order=1, return_poly=True) # type: XYZPolyType
self.row_ss = sicd.Grid.Row.SS # type: float
self.col_ss = sicd.Grid.Col.SS # type: float
self.first_row = sicd.ImageData.FirstRow # type: int
self.first_col = sicd.ImageData.FirstCol # type: int
self.scp_row = sicd.ImageData.SCPPixel.Row # type: int
self.scp_col = sicd.ImageData.SCPPixel.Col # type: int
if delta_arp is None:
delta_arp = numpy.array([0, 0, 0], dtype=numpy.float64)
if not isinstance(delta_arp, numpy.ndarray):
delta_arp = numpy.array(delta_arp, dtype=numpy.float64)
if delta_arp.shape != (3, ):
raise ValueError('delta_arp must have shape (3, ). Got {}'.format(delta_arp.shape))
if delta_varp is None:
delta_varp = numpy.array([0, 0, 0], dtype=numpy.float64)
if not isinstance(delta_varp, numpy.ndarray):
delta_varp = numpy.array(delta_varp, dtype=numpy.float64)
if delta_varp.shape != (3, ):
raise ValueError('delta_varp must have shape (3, ). Got {}'.format(delta_varp.shape))
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
if sicd.SCPCOA.ARPPos is None or sicd.SCPCOA.ARPVel is None:
raise ValueError(
'The adj_params_frame is of RIC type, but one of SCPCOA.ARPPos or '
'SCPCOA.ARPVel is not populated.')
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
ric_matrix = _ric_ecf_mat(ARP_SCP_COA, VARP_SCP_COA, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
self.delta_arp = delta_arp # type: numpy.ndarray
self.delta_varp = delta_varp # type: numpy.ndarray
if range_bias is None:
range_bias = 0.0
else:
range_bias = float(range_bias)
self.range_bias = range_bias # type: float
# bind the method specific intermediate projection method
self._method_proj = MethodType(_get_type_specific_projection(sicd), self)
def _init_proj(self, im_points):
"""
Parameters
----------
im_points : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray,...]
"""
row_meters = (im_points[:, 0] + self.first_row - self.scp_row)*self.row_ss
col_meters = (im_points[:, 1] + self.first_col - self.scp_col)*self.col_ss
t_coa = self.time_coa_poly(row_meters, col_meters)
# calculate aperture reference position and velocity at target time
arp_coa = self.arp_poly(t_coa)
varp_coa = self.varp_poly(t_coa)
return row_meters, col_meters, t_coa, arp_coa, varp_coa
def projection(self, im_points):
"""
Perform the projection from image coordinates to R/Rdot coordinates.
Parameters
----------
im_points : numpy.ndarray
This array of image point coordinates, **expected to have shape (N, 2)**.
Returns
-------
Tuple[numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray]
* `r_tgt_coa` - range to the ARP at COA
* `r_dot_tgt_coa` - range rate relative to the ARP at COA
* `t_coa` - center of aperture time since CDP start for input ip
* `arp_coa` - aperture reference position at t_coa
* `varp_coa` - velocity at t_coa
"""
row_meters, col_meters, t_coa, arp_coa, varp_coa = self._init_proj(im_points)
r_tgt_coa, r_dot_tgt_coa = self._method_proj(row_meters, col_meters, t_coa, arp_coa, varp_coa)
# adjust parameters (TODO: after all the calculations?)
arp_coa += self.delta_arp
varp_coa += self.delta_varp
r_tgt_coa += self.range_bias
return r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa
def _get_type_specific_projection(sicd):
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_meters, col_meters, t_coa, arp_coa, varp_coa).
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
callable
"""
# triple-nested function - it was conceptually clearest...
def pfa_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
pfa = sicd.PFA
polar_ang_poly = pfa.PolarAngPoly
spatial_freq_sf_poly = pfa.SpatialFreqSFPoly
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
PFA specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa * ARP_minus_SCP, axis=-1) / rSCPTgtCoa
thetaTgtCoa = polar_ang_poly(t_coa)
dThetaDtTgtCoa = polar_ang_poly_der(t_coa)
# Compute polar aperture scale factor (KSF) and derivative wrt polar angle
ksfTgtCoa = spatial_freq_sf_poly(thetaTgtCoa)
dKsfDThetaTgtCoa = spatial_freq_sf_poly_der(thetaTgtCoa)
# Compute spatial frequency domain phase slopes in Ka and Kc directions
# NB: sign for the phase may be ignored as it is cancelled in a subsequent computation.
dPhiDKaTgtCoa = row_meters * numpy.cos(thetaTgtCoa) + col_meters * numpy.sin(thetaTgtCoa)
dPhiDKcTgtCoa = -row_meters * numpy.sin(thetaTgtCoa) + col_meters * numpy.cos(thetaTgtCoa)
# Compute range relative to SCP
deltaRTgtCoa = ksfTgtCoa * dPhiDKaTgtCoa
# Compute derivative of range relative to SCP wrt polar angle.
# Scale by derivative of polar angle wrt time.
dDeltaRDThetaTgtCoa = dKsfDThetaTgtCoa * dPhiDKaTgtCoa + ksfTgtCoa * dPhiDKcTgtCoa
deltaRDotTgtCoa = dDeltaRDThetaTgtCoa * dThetaDtTgtCoa
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def rgazcomp_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
az_sf = sicd.RgAzComp.AzSF
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
RgAzComp specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa*ARP_minus_SCP, axis=-1)/rSCPTgtCoa
deltaRTgtCoa = row_meters
deltaRDotTgtCoa = -numpy.linalg.norm(varp_coa, axis=-1)*az_sf*col_meters
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def inca_projection():
inca = sicd.RMA.INCA
r_ca_scp = inca.R_CA_SCP
time_ca_poly = inca.TimeCAPoly
drate_sf_poly = inca.DRateSFPoly
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
INCA specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
# compute range/time of closest approach
R_CA_TGT = r_ca_scp + row_meters # Range at closest approach
t_CA_TGT = time_ca_poly(col_meters) # Time of closest approach
# Compute ARP velocity magnitude (actually squared, since that's how it's used) at t_CA_TGT
VEL2_CA_TGT = numpy.sum(instance.varp_poly(t_CA_TGT)**2, axis=-1)
# Compute the Doppler Rate Scale Factor for image Grid location
DRSF_TGT = drate_sf_poly(row_meters, col_meters)
# Difference between COA time and CA time
dt_COA_TGT = t_coa - t_CA_TGT
r_tgt_coa = numpy.sqrt(R_CA_TGT*R_CA_TGT + DRSF_TGT*VEL2_CA_TGT*dt_COA_TGT*dt_COA_TGT)
r_dot_tgt_coa = (DRSF_TGT/r_tgt_coa)*VEL2_CA_TGT*dt_COA_TGT
return r_tgt_coa, r_dot_tgt_coa
return method_projection
def plane_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array()
uCol = sicd.Grid.Row.UVectECF.get_array()
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_meters : numpy.ndarray
col_meters : numpy.ndarray
t_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - (SCP + numpy.outer(row_meters, uRow) + numpy.outer(col_meters, uCol))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return method_projection
# NB: sicd.can_project_coordinates() has been called, so all required attributes
# must be populated
if sicd.Grid.Type == 'RGAZIM':
if sicd.ImageFormation.ImageFormAlgo == 'PFA':
return pfa_projection()
elif sicd.ImageFormation.ImageFormAlgo == 'RGAZCOMP':
return rgazcomp_projection()
elif sicd.Grid.Type == 'RGZERO':
return inca_projection()
elif sicd.Grid.Type in ['XRGYCR', 'XCTYAT', 'PLANE']:
return plane_projection()
else:
# NB: this will have been noted by sicd.can_project_coordinates(), but is
# here for completeness
raise ValueError('Unhandled Grid.Type'.format(sicd.Grid.Type))
def _validate_im_points(im_points, sicd):
"""
Parameters
----------
im_points : numpy.ndarray|list|tuple
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
numpy.ndarray
"""
if im_points is None:
raise ValueError('The argument cannot be None')
if not isinstance(im_points, numpy.ndarray):
im_points = numpy.array(im_points, dtype=numpy.float64)
orig_shape = im_points.shape
if len(im_points.shape) == 1:
im_points = numpy.reshape(im_points, (1, -1))
if im_points.shape[-1] != 2:
raise ValueError(
'The im_points array must represent an array of points in pixel coordinates, '
'so the final dimension of im_points must have length 2. Have im_points.shape = {}'.format(im_points.shape))
# check to ensure that the entries of im_points are not ridiculous
rows = sicd.ImageData.NumRows
cols = sicd.ImageData.NumCols
row_bounds = (-rows/2, 3*rows/2)
col_bounds = (-cols/2, 3*cols/2)
if numpy.any(
(im_points[:, 0] < row_bounds[0]) | (im_points[:, 0] > row_bounds[1]) |
(im_points[:, 1] < col_bounds[0]) | (im_points[:, 1] > col_bounds[1])):
raise ValueError(
'The sicd is has {} rows and {} cols. image_to_ground projection effort '
'requires row coordinates in the range {} and column coordinates '
'in the range {}'.format(rows, cols, row_bounds, col_bounds))
return im_points, orig_shape
def image_to_ground(im_points, sicd, block_size=50000, projection_type='HAE', **kwargs):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
kwargs : dict
keyword arguments relevant for the given projection type. See image_to_ground_plane/hae/dem methods.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF) corresponding input image coordinates. The interpretation
or meaning of the physical coordinates depends on `projection_type` chosen.
"""
p_type = projection_type.upper()
if p_type == 'PLANE':
return image_to_ground_plane(im_points, sicd, block_size=block_size, **kwargs)
elif p_type == 'HAE':
return image_to_ground_hae(im_points, sicd, block_size=block_size, **kwargs)
elif p_type == 'DEM':
return image_to_ground_dem(im_points, sicd, block_size=block_size, **kwargs)
else:
raise ValueError('Got unrecognized projection type {}'.format(projection_type))
def image_to_ground_geo(im_points, sicd, **kwargs):
"""
Transforms image coordinates to ground plane Lat/Lon/HAE coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
SICD meta data structure.
kwargs : dict
See the keyword arguments in :func:`image_to_ground`.
Returns
-------
numpy.ndarray
Ground Plane Point (in Lat/Lon/HAE coordinates) along the R/Rdot contour.
"""
return geocoords.ecf_to_geodetic(image_to_ground(im_points, sicd, **kwargs))
#####
# Image-to-Ground Plane
def _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ):
"""
Parameters
----------
r_tgt_coa : numnpy.ndarray
r_dot_tgt_coa : numnpy.ndarray
arp_coa : numnpy.ndarray
varp_coa : numnpy.ndarray
gref : numnpy.ndarray
uZ : numnpy.ndarray
Returns
-------
numpy.ndarray
"""
# Solve for the intersection of a R/Rdot contour and a ground plane.
arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1)
arpZ[arpZ > r_tgt_coa] = numpy.nan
# ARP ground plane nadir
aGPN = arp_coa - numpy.outer(arpZ, uZ)
# Compute ground plane distance (gd) from ARP nadir to circle of const range
gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ)
# Compute sine and cosine of grazing angle
cosGraz = gd/r_tgt_coa
sinGraz = arpZ/r_tgt_coa
# Velocity components normal to ground plane and parallel to ground plane.
vMag = numpy.linalg.norm(varp_coa, axis=-1)
vZ = numpy.dot(varp_coa, uZ)
vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution
# Orient X such that Vx > 0 and compute unit vectors uX and uY
uX = ((varp_coa - numpy.outer(vZ, uZ)).T/vX).T
uY = numpy.cross(uZ, uX)
# Compute cosine of azimuth angle to ground plane point
cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz)
cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane
# Compute sine of azimuth angle. Use LOOK to establish sign.
look = numpy.sign(numpy.dot(numpy.cross(arp_coa-gref, varp_coa), uZ))
sinAz = look * numpy.sqrt(1-cosAz*cosAz)
# Compute Ground Plane Point in ground plane and along the R/Rdot contour
return aGPN + (uX.T*gd*cosAz + uY.T*gd*sinAz).T
def _image_to_ground_plane(im_points, coa_projection, gref, uZ):
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
gref : numpy.ndarray
uZ : numpy.ndarray
Returns
-------
numpy.ndarray
"""
r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
return _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ)
def image_to_ground_plane(im_points, sicd, block_size=50000, gref=None, ugpn=None, **coa_args):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
the SICD metadata structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
gref : None|numpy.ndarray|list|tuple
Ground plane reference point ECF coordinates (m). The default is the SCP
ugpn : None|numpy.ndarray|list|tuple
Vector normal to the plane to which we are projecting.
coa_args : dict
keyword arguments for COAProjection constructor.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.
"""
# method parameter validation
if gref is None:
gref = sicd.GeoData.SCP.ECF.get_array()
if ugpn is None:
ugpn = sicd.PFA.FPN.get_array() if sicd.ImageFormation.ImageFormAlgo == 'PFA' \
else geocoords.wgs_84_norm(gref)
if len(ugpn.shape) == 2:
ugpn = numpy.reshape(ugpn, (3, ))
uZ = ugpn/numpy.linalg.norm(ugpn)
# coa projection creation
im_points, orig_shape = _validate_im_points(im_points, sicd)
coa_proj = COAProjection(sicd, **coa_args)
# prepare workspace
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
num_points = im_points_view.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_plane(im_points_view, coa_proj, gref, uZ)
else:
coords = numpy.zeros((num_points, 3), dtype=numpy.float64)
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_plane(
im_points_view[start_block:end_block], coa_proj, gref, uZ)
start_block = end_block
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1, ))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
#####
# Image-to-HAE
def _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn,
hae0, delta_hae_max, hae_nlim, scp_hae):
"""
Intermediate helper method.
Parameters
----------
r_tgt_coa : numpy.ndarray
r_dot_tgt_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
SCP : numpy.ndarray
ugpn : numpy.ndarray
hae0 : float
delta_hae_max : float
hae_nlim : int
scp_hae : float
Returns
-------
numpy.ndarray
"""
# Compute the geodetic ground plane normal at the SCP.
look = numpy.sign(numpy.sum(numpy.cross(arp_coa, varp_coa)*(SCP-arp_coa), axis=1))
gref = SCP - (scp_hae - hae0)*ugpn
# iteration variables
gpp = None
delta_hae = None
cont = True
iters = 0
while cont:
iters += 1
# Compute the precise projection along the R/Rdot contour to Ground Plane.
gpp = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, ugpn)
# check our hae value versus hae0
gpp_llh = geocoords.ecf_to_geodetic(gpp)
delta_hae = gpp_llh[:, 2] - hae0
abs_delta_hae = | numpy.abs(delta_hae) | numpy.abs |
""" Module that uses CMAC 2.0 to remove and correct second trip returns,
correct velocity and more. A new radar object is then created with all CMAC
2.0 products. """
import copy
import json
import sys
import netCDF4
import numpy as np
import pyart
from .cmac_processing import (
do_my_fuzz, get_melt, get_texture, fix_phase_fields, gen_clutter_field_from_refl, beam_block)
from .config import get_cmac_values, get_field_names, get_metadata
def cmac(radar, sonde, config, geotiff=None, flip_velocity=False,
meta_append=None, verbose=True):
"""
Corrected Moments in Antenna Coordinates
Parameters
----------
radar : Radar
Radar object to use in the CMAC calculation.
sonde : Object
Object containing all the sonde data.
config : str
A string pointing to dictionaries containing values for CMAC 2.0
specific to a radar.
Other Parameters
----------------
geotiff : str
Filepath for a geotiff, if provided, will generate a beam blockage
gate id.
meta_append : dict, json and None
Value key pairs to attend to global attributes. If None,
a default metadata will be created. The metadata can also
be created by providing a dictionary or a json file.
verbose : bool
If True, this will display more statistics.
Returns
-------
radar : Radar
Radar object with new CMAC added fields.
"""
# Retrieve values from the configuration file.
cmac_config = get_cmac_values(config)
field_config = get_field_names(config)
meta_config = get_metadata(config)
# Over write site altitude
if 'site_alt' in cmac_config.keys():
radar.altitude['data'][0] = cmac_config['site_alt']
# Obtaining variables needed for fuzzy logic.
radar_start_date = netCDF4.num2date(
radar.time['data'][0], radar.time['units'],
only_use_cftime_datetimes=False, only_use_python_datetimes=True)
print('##', str(radar_start_date))
temp_field = field_config['temperature']
alt_field = field_config['altitude']
vel_field = field_config['velocity']
if 'gen_clutter_from_refl' not in cmac_config.keys():
cmac_config['gen_clutter_from_refl'] = False
if cmac_config['gen_clutter_from_refl']:
new_clutter_field = gen_clutter_field_from_refl(
radar, field_config['input_clutter_corrected_reflectivity'],
field_config['reflectivity'],
diff_dbz=cmac_config['gen_clutter_from_refl_diff'],
max_h=cmac_config['gen_clutter_from_refl_alt'])
radar.add_field(
field_config['clutter'], new_clutter_field, replace_existing=True)
radar.fields[field_config['clutter']]['units'] = '1'
radar.fields[field_config['clutter']]['valid_max'] = 1
radar.fields[field_config['clutter']]['valid_min'] = 0
# ZDR offsets
if 'zdr_offset' in cmac_config.keys():
if 'offset_zdrs' in cmac_config.keys():
for fld in cmac_config['offset_zdrs']:
radar.fields[fld]['data'] += cmac_config['zdr_offset']
else:
radar.fields[
field_config['input_zdr']]['data'] += cmac_config['zdr_offset']
# flipping phidp
if 'flip_phidp' not in cmac_config.keys():
cmac_config['flip_phidp'] = False
if cmac_config['flip_phidp']:
# user specifies fields to flip
if 'phidp_flipped' in cmac_config.keys():
for fld in cmac_config['phidp_flipped']:
radar.fields[fld]['data'] = radar.fields[fld]['data'] * -1.0
else: # just flip defined phidp field
radar.fields[
field_config['input_phidp_field']]['data'] = radar.fields[
field_config['input_phidp_field']]['data']*-1.0
if flip_velocity:
radar.fields[vel_field]['data'] = radar.fields[
vel_field]['data'] * -1.0
z_dict, temp_dict = pyart.retrieve.map_profile_to_gates(
sonde.variables[temp_field][:], sonde.variables[alt_field][:], radar)
if 'clutter_mask_z_for_texture' not in cmac_config.keys():
cmac_config['clutter_mask_z_for_texture'] = False
if cmac_config['clutter_mask_z_for_texture']:
masked_vr = copy.deepcopy(radar.fields[vel_field])
if 'ground_clutter' in radar.fields.keys():
masked_vr['data'] = np.ma.masked_where(radar.fields['ground_clutter']['data'] == 1, masked_vr['data'])
masked_vr['data'][radar.fields['ground_clutter']['data'] == 1] = np.nan
radar.add_field('clutter_masked_velocity', masked_vr, replace_existing=True)
texture = get_texture(radar, 'clutter_masked_velocity')
texture['data'][np.isnan(texture['data'])] = 0.0
else:
texture = get_texture(radar, vel_field)
snr = pyart.retrieve.calculate_snr_from_reflectivity(radar)
if not verbose:
print('## Adding radar fields...')
if verbose:
print('##')
print('## These radar fields are being added:')
temp_dict['units'] = 'degC'
z_dict['units'] = 'm'
radar.add_field('sounding_temperature', temp_dict, replace_existing=True)
radar.add_field('height', z_dict, replace_existing=True)
radar.add_field('signal_to_noise_ratio', snr, replace_existing=True)
radar.add_field('velocity_texture', texture, replace_existing=True)
if verbose:
print('## sounding_temperature')
print('## height')
print('## signal_to_noise_ratio')
print('## velocity_texture')
# Performing fuzzy logic to obtain the gate ids.
rhv_field = field_config['cross_correlation_ratio']
ncp_field = field_config['normalized_coherent_power']
if 'mbfs' not in cmac_config:
cmac_config['mbfs'] = None
if 'hard_const' not in cmac_config:
cmac_config['hard_const'] = None
# Specifically for dealing with the ingested C-SAPR2 data
my_fuzz, _ = do_my_fuzz(radar, rhv_field, ncp_field, verbose=verbose,
custom_mbfs=cmac_config['mbfs'],
custom_hard_constraints=cmac_config['hard_const'])
radar.add_field('gate_id', my_fuzz,
replace_existing=True)
if 'ground_clutter' in radar.fields.keys():
# Adding fifth gate id, clutter.
clutter_data = radar.fields['ground_clutter']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 1] = 5
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
radar.fields['gate_id']['valid_max'] = 0
if 'classification_mask' in radar.fields.keys():
clutter_data = radar.fields['classification_mask']['data']
gate_data = radar.fields['gate_id']['data'].copy()
radar.fields['gate_id']['data'][clutter_data == 8] = 5
radar.fields['gate_id']['data'][clutter_data == 16] = 5
radar.fields['gate_id']['data'][clutter_data == 4] = 5
radar.fields['gate_id']['data'][clutter_data == 1] = 0
radar.fields['gate_id']['data'][clutter_data == 2] = 0
radar.fields['gate_id']['data'][gate_data == 0] = 0
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',5:clutter'
radar.fields['gate_id']['valid_max'] = 5
radar.fields['gate_id']['valid_max'] = 0
if geotiff is not None:
pbb_all, cbb_all = beam_block(
radar, geotiff, cmac_config['radar_height_offset'],
cmac_config['beam_width'])
radar.fields['gate_id']['data'][cbb_all > 0.30] = 6
notes = radar.fields['gate_id']['notes']
radar.fields['gate_id']['notes'] = notes + ',6:terrain_blockage'
radar.fields['gate_id']['valid_max'] = 6
pbb_dict = pbb_to_dict(pbb_all)
cbb_dict = cbb_to_dict(cbb_all)
radar.add_field('partial_beam_blockage', pbb_dict)
radar.add_field('cumulative_beam_blockage', cbb_dict)
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
cat_dict.update(
{pair_str.split(':')[1]:int(pair_str.split(':')[0])})
if verbose:
print('## gate_id')
# Corrected velocity using pyart's region dealiaser.
cmac_gates = pyart.correct.GateFilter(radar)
cmac_gates.exclude_all()
cmac_gates.include_equal('gate_id', cat_dict['rain'])
cmac_gates.include_equal('gate_id', cat_dict['melting'])
cmac_gates.include_equal('gate_id', cat_dict['snow'])
# Create a simulated velocity field from the sonde object.
u_field = field_config['u_wind']
v_field = field_config['v_wind']
u_wind = sonde.variables[u_field][:]
v_wind = sonde.variables[v_field][:]
alt_field = field_config['altitude']
sonde_alt = sonde.variables[alt_field][:]
profile = pyart.core.HorizontalWindProfile.from_u_and_v(
sonde_alt, u_wind, v_wind)
sim_vel = pyart.util.simulated_vel_from_profile(radar, profile)
radar.add_field('simulated_velocity', sim_vel, replace_existing=True)
# Create the corrected velocity field from the region dealias algorithm.
corr_vel = pyart.correct.dealias_region_based(
radar, vel_field=vel_field, ref_vel_field='simulated_velocity',
keep_original=False, gatefilter=cmac_gates, centered=True)
radar.add_field('corrected_velocity', corr_vel, replace_existing=True)
if verbose:
print('## corrected_velocity')
print('## simulated_velocity')
fzl = get_melt(radar)
# Is the freezing level realistic? If not, assume
ref_offset = cmac_config['ref_offset']
self_const = cmac_config['self_const']
# Calculating differential phase fields.
radar.fields['differential_phase']['data'][
radar.fields['differential_phase']['data']<0] += 360.0
phidp, kdp = pyart.correct.phase_proc_lp_gf(
radar, gatefilter=cmac_gates, offset=ref_offset, debug=True,
nowrap=50, fzl=fzl, self_const=self_const)
phidp_filt, kdp_filt = fix_phase_fields(
copy.deepcopy(kdp), copy.deepcopy(phidp), radar.range['data'],
cmac_gates)
radar.add_field('corrected_differential_phase', phidp,
replace_existing=True)
radar.fields['corrected_differential_phase']['long_name'] = 'Corrected differential propagation phase shift'
radar.add_field('filtered_corrected_differential_phase', phidp_filt,
replace_existing=True)
radar.fields[
'filtered_corrected_differential_phase']['long_name'] = 'Filtered corrected differential propagation phase shift'
radar.add_field('corrected_specific_diff_phase', kdp,
replace_existing=True)
radar.add_field('filtered_corrected_specific_diff_phase', kdp_filt,
replace_existing=True)
radar.fields[
'filtered_corrected_specific_diff_phase']['long_name'] = 'Filtered Corrected Specific differential phase (KDP)'
radar.fields['filtered_corrected_differential_phase']['long_name'] = 'Filtered Corrected Differential Phase'
if 'clutter_masked_velocity' in radar.fields.keys():
radar.fields['clutter_masked_velocity']['long_name'] = 'Radial mean Doppler velocity, positive for motion away from the instrument, clutter removed'
if verbose:
print('## corrected_specific_diff_phase')
print('## filtered_corrected_specific_diff_phase')
print('## corrected_differential_phase')
print('## filtered_corrected_differential_phase')
# Calculating attenuation by using pyart.
refl_field = field_config['reflectivity']
attenuation_a_coef = cmac_config['attenuation_a_coef']
c_coef = cmac_config['c_coef']
d_coef = cmac_config['d_coef']
beta_coef = cmac_config['beta_coef']
rr_a = cmac_config['rain_rate_a_coef']
rr_b = cmac_config['rain_rate_b_coef']
zdr_field = field_config['differential_reflectivity']
radar.fields['corrected_differential_reflectivity'] = copy.deepcopy(
radar.fields[zdr_field])
radar.fields['corrected_reflectivity'] = copy.deepcopy(
radar.fields[refl_field])
radar.fields['corrected_reflectivity']['data'] = np.ma.masked_where(
cmac_gates.gate_excluded,
radar.fields['corrected_reflectivity']['data'])
# Get specific differential attenuation.
# Need height over 0C isobar.
iso0 = np.ma.mean(radar.fields['height']['data'][
np.where(np.abs(radar.fields['sounding_temperature']['data']) < 0.1)])
radar.fields['height_over_iso0'] = copy.deepcopy(radar.fields['height'])
radar.fields['height_over_iso0']['data'] -= iso0
radar.fields['height_over_iso0']['long_name'] = 'Height of radar beam over freezing level'
phidp_field = field_config['phidp_field']
(spec_at, pia_dict, cor_z, spec_diff_at,
pida_dict, cor_zdr) = pyart.correct.calculate_attenuation_zphi(
radar, temp_field='sounding_temperature',
iso0_field='height_over_iso0',
zdr_field=field_config['zdr_field'],
pia_field=field_config['pia_field'],
phidp_field=field_config['phidp_field'],
refl_field=field_config['refl_field'], c=c_coef, d=d_coef,
a_coef=attenuation_a_coef, beta=beta_coef,
gatefilter=cmac_gates)
# cor_zdr['data'] += cmac_config['zdr_offset'] Now taken care of at start
radar.add_field('specific_attenuation', spec_at, replace_existing=True)
radar.add_field('path_integrated_attenuation', pia_dict,
replace_existing=True)
radar.add_field('corrected_reflectivity', cor_z, replace_existing=True)
radar.add_field('specific_differential_attenuation', spec_diff_at,
replace_existing=True)
radar.add_field('path_integrated_differential_attenuation', pida_dict,
replace_existing=True)
radar.add_field('corrected_differential_reflectivity', cor_zdr,
replace_existing=True)
radar.fields['corrected_velocity']['units'] = 'm/s'
radar.fields['corrected_velocity']['valid_min'] = np.round(
radar.fields['corrected_velocity']['valid_min'], 4)
radar.fields['corrected_velocity']['valid_max'] = np.round(
radar.fields['corrected_velocity']['valid_max'], 4)
radar.fields['simulated_velocity']['units'] = 'm/s'
radar.fields['velocity_texture']['units'] = 'm/s'
radar.fields['unfolded_differential_phase']['long_name'] = 'Unfolded differential propagation phase shift'
cat_dict = {}
for pair_str in radar.fields['gate_id']['notes'].split(','):
if verbose:
print(pair_str)
cat_dict.update({pair_str.split(':')[1]: int(pair_str.split(':')[0])})
rain_gates = pyart.correct.GateFilter(radar)
rain_gates.exclude_all()
rain_gates.include_equal('gate_id', cat_dict['rain'])
# Calculating rain rate.
R = rr_a * (radar.fields['specific_attenuation']['data']) ** rr_b
rainrate = copy.deepcopy(radar.fields['specific_attenuation'])
rainrate['data'] = R
rainrate['valid_min'] = 0.0
rainrate['valid_max'] = 400.0
rainrate['standard_name'] = 'rainfall_rate'
rainrate['long_name'] = 'rainfall_rate'
rainrate['least_significant_digit'] = 1
rainrate['units'] = 'mm/hr'
radar.fields.update({'rain_rate_A': rainrate})
# This needs to be updated to a gatefilter.
mask = radar.fields['reflectivity']['data'].mask
radar.fields['rain_rate_A'].update({
'comment': 'Rain rate calculated from specific_attenuation,'
+ ' R=51.3*specific_attenuation**0.81, note R=0.0 where'
+ ' norm coherent power < 0.4 or rhohv < 0.8'})
if verbose:
print('## Rainfall rate as a function of A ##')
print('##')
print('## All CMAC fields have been added to the radar object.')
print('##')
# Adding the metadata to the cmac radar object.
print('## Appending metadata')
command_line = ''
for item in sys.argv:
command_line = command_line + ' ' + item
if meta_append is None:
meta = {
'site_id': None,
'data_level': 'sgp',
'comment': 'This is highly experimental and initial data. '
+ 'There are many known and unknown issues. Please do '
+ 'not use before contacting the Translator responsible '
+ '<EMAIL>',
'attributions': 'This data is collected by the ARM Climate Research '
+ 'facility. Radar system is operated by the radar '
+ 'engineering team <EMAIL> and the data is '
+ 'processed by the precipitation radar products '
+ 'team. LP code courtesy of <NAME>, BNL.',
'version': '2.0 lite',
'vap_name': 'cmac',
'known_issues': 'False phidp jumps in insect regions. Still uses '
+ 'old Giangrande code.',
'developers': '<NAME>, ANL. <NAME>, ANL.',
'translator': '<NAME>, ANL.',
'mentors': '<NAME>, PNNL., <NAME>, PNNL.',
'Conventions': 'CF/Radial instrument_parameters ARM-1.3'}
else:
if meta_append.lower().endswith('.json'):
with open(meta_append, 'r') as infile:
meta = json.load(infile)
elif meta_append == 'config':
meta = meta_config
else:
raise RuntimeError('Must provide the file name of the json file',
'or say config to use the meta data from',
'config.py')
radar.metadata.clear()
radar.metadata.update(meta)
radar.metadata['command_line'] = command_line
return radar
def area_coverage(radar, precip_threshold=10.0, convection_threshold=40.0):
""" Returns percent coverage of precipitation and convection. """
temp_radar = radar.extract_sweeps([0])
ref = temp_radar.fields['corrected_reflectivity']['data']
total_len = len(ref.flatten())
ref_10_len = len(np.argwhere(ref >= precip_threshold))
ref_40_len = len( | np.argwhere(ref >= convection_threshold) | numpy.argwhere |
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
import json
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
#from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
class CocoDataset(Dataset):
"""Coco dataset."""
def __init__(self, root_dir, set_name='train2017', transform=None):
"""
Args:
root_dir (string): COCO directory.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.set_name = set_name
self.transform = transform
self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.root_dir, 'images', self.set_name, image_info['file_name'])
img = skimage.io.imread(path)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def num_classes(self):
return 80
class CSVDataset(Dataset):
"""CSV dataset."""
def __init__(self, train_file, class_list,img_dir, transform=None):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.train_file = train_file
self.class_list = class_list
self.transform = transform
self.img_dir = img_dir
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img_fname':self.image_names[idx],'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
img = skimage.io.imread(self.image_names[image_index])
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
if (x2-x1) < 1 or (y2-y1) < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError('line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)), None)
img_file = self.img_dir+img_file
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
class CustomDataset(Dataset):
"""Img folder with .json for each img"""
def __init__(self, img_list,class_list, transform=None):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.class_list = class_list
self.transform = transform
self.img_list = img_list
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
try:
self.image_data = self._read_annotations(self.img_list, self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img_fname':self.image_names[idx],'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
img = skimage.io.imread(self.image_names[image_index])
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
if (x2-x1) < 1 or (y2-y1) < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, img_list, classes):
result = {}
for idx,img_fname in enumerate(img_list):
annotations_fname = img_fname[:-3]+'json'
if img_fname not in result:
result[img_fname] = []
try:
with open(annotations_fname) as f:
annotations = json.load(f)
#img_file, x1, y1, x2, y2, class_name = row[:6]
except:
continue
bboxes = annotations['bboxes']
if len(bboxes)==0:
continue
for i,bbox in enumerate(bboxes):
x1, y1, x2, y2, class_name= bbox[0],bbox[1],bbox[2],bbox[3],annotations['class_name'][i]
# x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
# y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
# x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
# y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('bbox {}: x2 ({}) must be higher than x1 ({})'.format(i, x2, x1))
if y2 <= y1:
raise ValueError('bbox {}: y2 ({}) must be higher than y1 ({})'.format(i, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('bbox {}: unknown class name: \'{}\' (classes: {})'.format(i, class_name, classes))
result[img_fname].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
def collater(data):
img_fnames = [s['img_fname'] for s in data]
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
scales = [s['scale'] for s in data]
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = | np.array(heights) | numpy.array |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)
if boolinvr:
binsunif = binsunif[::-1]
meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanpara = meanparaunif
bins = binsunif
minmunif = minm
maxmunif = maxm
if scal == 'logt' or scal == 'powr':
meanpara = 10**meanparaunif
bins = 10**binsunif
minmunif = np.log10(minm)
maxmunif = np.log10(maxm)
if scal == 'asnh':
meanpara = np.sinh(meanparaunif)
bins = np.sinh(binsunif)
minmunif = np.arcsinh(minm)
maxmunif = np.arcsinh(maxm)
delt = np.diff(bins)
limt = np.array([minm, maxm])
# 'self' is not yet defined
if scal == 'asnh' or scal == 'logt' or scal == 'powr':
listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)
setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)
setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)
setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)
setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)
#labltick = np.empty(gdat.numbtickcbar, dtype=object)
#for k in range(gdat.numbtickcbar):
# if scal == 'asnh':
# valutick[k] = np.sinh(tickunif[k])
# if scal == 'logt' or scal == 'powr':
# valutick[k] = 10**(tickunif[k])
# # avoid very small, but nonzero central values in the residual count color maps
# if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:
# valutick[k] = 0.
# if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:
# labltick[k] = '%d' % valutick[k]
# else:
# labltick[k] = '%.3g' % valutick[k]
setattr(gmodoutp.limtpara, strgvarb, limt)
setattr(gmodoutp.binspara, strgvarb, bins)
setattr(gmodoutp.meanpara, strgvarb, meanpara)
setattr(gmodoutp.deltpara, strgvarb, delt)
def retr_ticklabltemp(gdat, strgcbar):
minm = getattr(gdat.minmpara, strgcbar)
maxm = getattr(gdat.maxmpara, strgcbar)
scal = getattr(gdat.scalpara, strgcbar)
numb = gdat.numbtickcbar - 1
retr_axis(gdat, strgcbar, numb=numb)
minmscal = minm
if scal == 'asnh':
minmscal = np.arcsinh(minmscal)
if scal == 'logt':
minmscal = np.log10(minmscal)
maxmscal = maxm
if scal == 'asnh':
maxmscal = np.arcsinh(maxmscal)
if scal == 'logt':
maxmscal = np.log10(maxmscal)
tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)
labl = np.empty(gdat.numbtickcbar, dtype=object)
tick = np.copy(tickscal)
for k in range(gdat.numbtickcbar):
if scal == 'asnh':
tick[k] = np.sinh(tickscal[k])
elif scal == 'logt':
tick[k] = 10**(tickscal[k])
# avoid very small, but nonzero central values in the residual count color maps
if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:
tick[k] = 0.
if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:
labl[k] = '%d' % tick[k]
else:
labl[k] = '%.3g' % tick[k]
setattr(gdat.tickpara, strgcbar, tick)
def retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):
if strgmodl is None:
listgdattemp = [gdat]
for strgmodl in gdat.liststrgmodl:
listgdattemp.append(getattr(gdat, strgmodl))
elif strgmodl == 'fitt' or strgmodl == 'true':
listgdattemp = [getattr(gdat, strgmodl)]
elif strgmodl == 'allm':
listgdattemp = []
for strgmodl in gdat.liststrgmodl:
listgdattemp = getattr(gdat, strgmodl)
for gdattemp in listgdattemp:
minm = getattr(gdattemp.minmpara, strgvarb)
maxm = getattr(gdattemp.maxmpara, strgvarb)
numb = getattr(gdattemp.numbbinspara, strgvarb)
scal = getattr(gdattemp.scalpara, strgvarb)
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsscal = np.linspace(minm, maxm, numb + 1)
if scal == 'logt':
print('minm')
print(minm)
print('maxm')
print(maxm)
print('strgvarb')
print(strgvarb)
binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)
print('')
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)
if boolinvr:
binsscal = binsscal[::-1]
meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanvarb = meanvarbscal
bins = binsscal
if scal == 'logt':
meanvarb = 10**meanvarbscal
bins = 10**binsscal
if scal == 'asnh':
meanvarb = np.sinh(meanvarbscal)
bins = np.sinh(binsscal)
delt = np.diff(bins)
limt = np.array([np.amin(bins), np.amax(bins)])
setattr(gdattemp.limtpara, strgvarb, limt)
setattr(gdattemp.binspara, strgvarb, bins)
setattr(gdattemp.meanpara, strgvarb, meanvarb)
setattr(gdattemp.deltpara, strgvarb, delt)
def setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):
# check if the variable is defined by the user
try:
valutemp = getattr(gdattemp, strgvarbtemp)
if valutemp is None:
raise
if gdat.typeverb > 0:
print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))
# if not defined or defined as None, define it
except:
setattr(gdattemp, strgvarbtemp, valu)
def intp_sinc(gdat, lgal, bgal):
intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \
sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))
return intpsinc
def retr_fluxbrgt(gdat, lgal, bgal, flux):
if lgal.size == 0:
fluxbrgt = np.array([0.])
fluxbrgtassc = np.array([0.])
else:
indxbrgt = np.argmax(flux)
fluxbrgt = flux[indxbrgt]
return fluxbrgt, fluxbrgtassc
def init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):
figrsize = (gdat.sizeimag, gdat.sizeimag)
figr, axis = plt.subplots(figsize=figrsize)
nameplot = strgplot
if gdat.numbener > 1:
nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]
if gdat.numbener > 1:
if indxevttplot == -1:
nameplot += 'evtA'
else:
nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]
if gdat.fitt.numbpopl > 1:
if indxpoplplot == -1:
nameplot += 'popA'
else:
nameplot += 'pop%d' % indxpoplplot
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
print('gdat.fitt.labltotlpara.lgalpop0')
print(gdat.fitt.labltotlpara.lgalpop0)
print('gdat.fitt.labltotlpara.bgalpop0')
print(gdat.fitt.labltotlpara.bgalpop0)
axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)
axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)
titl = ''
if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):
titl = gdat.strgener[indxenerplot]
if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):
titl += ' ' + gdat.strgevtt[indxevttplot]
axis.set_title(titl)
return figr, axis, path
def draw_frambndr(gdat, axis):
outr = max(gdat.frambndrmodl, gdat.frambndrdata)
axis.set_xlim([-outr, outr])
axis.set_ylim([-outr, outr])
innr = min(gdat.frambndrmodl, gdat.frambndrdata)
axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
def retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):
draw_frambndr(gdat, axis)
# take the relevant energy and PSF bins
if indxenerplot is not None:
if indxevttplot == -1:
maps = np.sum(maps[indxenerplot, ...], axis=1)
else:
maps = maps[indxenerplot, :, indxevttplot]
# project the map to 2D
if gdat.typepixl == 'heal':
maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)
if gdat.typepixl == 'cart':
shap = [gdat.numbsidecart] + list(maps.shape)
shap[1] = gdat.numbsidecart
shapflat = list(maps.shape)
shapflat[0] = gdat.numbpixlfull
mapstemp = np.zeros(shapflat)
if maps.size == gdat.indxpixlrofi.size:
mapstemp[gdat.indxpixlrofi, ...] = maps
else:
mapstemp[:, ...] = maps
maps = mapstemp.reshape(shap).swapaxes(0, 1)
# temp -- this is needed to bring the Fermi-LAT map to the right direction
#maps = fliplr(maps)
# rescale the map
if strgmodl is not None:
gmod = getattr(gdat, strgmodl)
else:
gmod = gdat
scal = getattr(gdat.scalpara, strgcbar)
cmap = getattr(gdat.cmappara, strgcbar)
vmin = getattr(gdat.minmpara, strgcbar)
vmax = getattr(gdat.maxmpara, strgcbar)
if scal == 'asnh':
maps = np.arcsinh(maps)
if scal == 'logt':
maps = np.log10(maps)
if imag is None:
imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)
return imag
else:
imag.set_data(maps)
def make_cbar(gdat, axis, imag, strgvarb):
# make a color bar
valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)
labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)
print('valutickmajr')
print(valutickmajr)
print('labltickmajr')
print(labltickmajr)
cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)
cbar.set_ticks(valutickmajr)
cbar.set_ticklabels(labltickmajr)
return cbar
def make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):
gmod = getattr(gdat, strgmodl)
# transdimensional elements
if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:
for l in gmod.indxpopl:
colr = retr_colr(gdat, strgstat, strgmodl, l)
if strgstat == 'pdfn':
labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])
else:
labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])
if not gmod.maxmpara.numbelem[l] == 0:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)
for q in gdat.indxrefr:
if not np.amax(gdat.refr.numbelem[q]) == 0:
if assc:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
else:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
# fixed-dimensional objects
if strgmodl == 'fitt':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gdat.typedata == 'mock':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
temphand, temp = axis.get_legend_handles_labels()
numblabl = len(temp)
if numblabl == 4:
numbcols = 2
else:
numbcols = 3
if mosa:
axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)
else:
axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)
def supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
# associations with the reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] > 0:
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])
lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])
bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])
numbelem = int(gdat.refr.numbelem[q])
if gdatmodi is not None and gmod.numbparaelem > 0 and assc:
### hit
indx = gdatmodi.this.indxelemrefrasschits[q][l]
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \
marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
### missed
indx = gdatmodi.this.indxelemrefrasscmiss[q][l]
else:
indx = np.arange(lgal.size)
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \
lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \
verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
# temp -- generalize this to input refrlgalhost vs.
if gdat.typedata == 'mock':
## host galaxy position
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \
label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.boollens:
## host galaxy Einstein radius
for e in gmod.indxsersfgrd:
truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \
gdat.anglfact * truebgalhost), \
gdat.anglfact * truebeinhost, \
edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))
if gmod.boollens:
## source galaxy position
axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \
gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \
facecolor='none', \
alpha=0.7, \
#alpha=gdat.alphelem, \
label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
# model catalog
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
if gdatmodi is not None:
if gmod.numbparaelem > 0:
colr = retr_colr(gdat, strgstat, strgmodl, l)
mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])
if 'lgal' in gdatmodi.this.indxparagenrfullelem:
lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]
bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]
else:
gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]
aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]
lgal, bgal = retr_lgalbgal(gang, aang)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \
lw=gdat.mrkrlinewdth, color=colr)
## source
if gmod.boollens:
lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]
axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \
alpha=gdat.alphelem, \
label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
## host
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \
alpha=gdat.alphelem, \
label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.boollens:
beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \
gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \
lw=gdat.mrkrlinewdth, ls='--'))
# temp
if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:
lgal = np.zeros(gdat.numbprvlhigh)
bgal = np.zeros(gdat.numbprvlhigh)
ampl = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]
bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]
# temp -- this does not allow sources with different spectra to be assigned to the same stacked sample
ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]
cntr += 1
mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])
colr = retr_colr(gdat, strgstat, strgmodl, l)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)
for r in gdat.indxstkscond:
lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])
bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)
def retr_colr(gdat, strgstat, strgmodl, indxpopl=None):
if strgmodl == 'true':
if indxpopl is None:
colr = gdat.refr.colr
else:
colr = gdat.refr.colrelem[indxpopl]
if strgmodl == 'fitt':
if strgstat == 'this' or strgstat == 'pdfn':
if indxpopl is None:
colr = gmod.colr
else:
colr = gmod.colrelem[indxpopl]
if strgstat == 'mlik':
colr = 'r'
return colr
def retr_levipost(listllik):
minmlistllik = np.amin(listllik)
levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik
return levipost
def retr_infofromlevi(pmeallik, levi):
info = pmeallik - levi
return info
def retr_jcbn():
fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')
matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \
[-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \
[-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \
[-lgalauxi, 0, 1, -fluxauxi, 0, 0], \
[-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \
[-bgalauxi, 0, 0, 0, 1, -fluxauxi]])
jcbn = matr.det()
return jcbn
# f1 = uf f0
# f2 = (1 - uf) f0
# x1 = x0 + (1 - uf) ux
# x2 = x0 - uf ux
# y1 = y0 + (1 - uf) uy
# y2 = y0 - uf uy
# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy
# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy
# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy
# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy
# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy
# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy
# f0 uf 0 0 0 0
# -f0 1 - uf 0 0 0 0
# -ux 0 1 1 - uf 0 0
# -ux 0 1 -uf 0 0
# -uy 0 0 0 1 1 - uf
# -uy 0 0 0 1 -uf
# f0
#retr_jcbn()
def retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):
# temp -- heal does not work when the dimension of lgalfrst is 1
if gdat.typepixl == 'heal':
dir1 = np.array([lgalfrst, bgalfrst])
dir2 = np.array([lgalseco, bgalseco])
angldist = hp.rotator.angdist(dir1, dir2)
else:
angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)
return angldist
def retr_deflextr(gdat, indxpixlelem, sher, sang):
factcosi = sher * np.cos(2. * sang)
factsine = sher * np.cos(2. * sang)
defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]
deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]
return np.vstack((defllgal, deflbgal)).T
def readfile(path):
print('Reading %s...' % path)
filepick = open(path + '.p', 'rb')
filearry = h5py.File(path + '.h5', 'r')
gdattemptemp = pickle.load(filepick)
for attr in filearry:
setattr(gdattemptemp, attr, filearry[attr][()])
filepick.close()
filearry.close()
if 'gdatfinl' in path or 'gdatinit' in path:
if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):
gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')
gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')
gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \
gdattemptemp.redsintp, fill_value='extrapolate')
return gdattemptemp
def init_stat(gdat):
# construct the initial state
if gdat.typeverb > 0:
print('Initializing the sampler state...')
print('inittype')
print(gdat.inittype)
gmod = gdat.fitt
## initialization
### initialize the unit sample vector randomly
gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)
gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)
## impose user-specified initial state
### number of elements
## create dummy indxparagenrfullelem
gmod.this.indxparagenrfullelem = None
if gmod.numbparaelem > 0:
if gdat.inittype == 'refr':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]
else:
for l in gmod.indxpopl:
if gmod.typemodltran == 'pois':
meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \
gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]
print('temp -- user input is not working for numbelem')
#namevarb = 'numbelempop%d' % l
#initvalu = getattr(gmod.init, namevarb)
#if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:
# raise Exception('Bad initial number of elements...')
#gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu
if gmod.typemodltran == 'pois':
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])
gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]
if gdat.booldiagmode:
if gdat.typedata == 'mock' and gdat.inittype == 'refr':
for l in gmod.indxpopl:
if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:
raise Exception('')
if gmod.numbparaelem > 0:
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.inittype == 'reco':
if gdat.namerecostat is not None:
strgcnfg = gdat.namerecostat
else:
strgcnfg = gdat.strgcnfg
path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'
if os.path.exists(path):
boolinitreco = True
thisfile = h5py.File(path, 'r')
if gdat.typeverb > 0:
print('Initializing from the state %s...' % path)
print('Likelihood:')
print(thisfile['lliktotl'][...])
# find the number of populations provided
maxmindxpopl = 0
for l in range(10):
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
if gmod.indxpopl > maxmindxpopl:
maxmindxpopl = gmod.indxpopl
numbpoplinpt = maxmindxpopl + 1
if numbpoplinpt != gmod.numbpopl:
print('State file and fitting metamodel have different number of populations.')
# find the number of elements provided
cntr = np.zeros(gmod.numbpoplinpt, dtype=int)
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
cntr[indxpopl] += 1
if gdat.typeverb > 0:
print('Number of elements found:')
print(cntr)
for attr in thisfile:
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase == attr:
if gmod.nameparagenrbase.startswith('numbelem'):
try:
indxpopltemp = int(gmod.nameparagenrbase[-1])
initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)
print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')
except:
initnumbelem = thisfile[attr][()]
gmod.this.paragenrunitfull[k] = initnumbelem
else:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)
if gmod.this.paragenrunitfull[k] == 0.:
print('Warning CDF is zero.')
if not np.isfinite(thisfile[attr][()]):
raise Exception('Retreived state parameter is not finite.')
if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \
(not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \
gmod.this.paragenrunitfull[k] > 1.):
raise Exception('CDF of the retreived state parameter is bad.')
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)
if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem
if gdat.typeverb > 0:
print('Tapering off the element list...')
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
if gdat.typeverb > 0:
print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')
print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if (gmod.this.paragenrunitfull == 0).all():
raise Exception('Bad initialization.')
if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:
for nameparagenrelem in gmod.namepara.elem:
initcomp = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))
for k in range(len(gmod.this.indxelemfull[l])):
namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)
for attr in thisfile:
if namefiel == attr:
initcomp[l][k] = thisfile[namefiel][()]
setattr(gdat, 'init' + nameparagenrelem, initcomp)
initcompfromstat(gdat, gdatmodi, 'init')
thisfile.close()
else:
boolinitreco = False
if gdat.typeverb > 0:
print('Could not find the state file, %s, to initialize the sampler.' % path)
if gdat.inittype == 'refr':
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]
if gdat.typedata == 'mock':
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \
gmod.nameparagenrbase in gmod.nameparagenrbase:
gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.typeverb > 1:
show_paragenrscalfull(gdat, gdatmodi)
if gmod.this.indxparagenrfullelem is not None:
print('Initializing elements from the reference element parameters...')
show_paragenrscalfull(gdat, gdatmodi)
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
show_paragenrscalfull(gdat, gdatmodi)
initcompfromstat(gdat, gdatmodi, 'refr')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
## impose user-specified individual initial values
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase.startswith('numbelem'):
continue
if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':
try:
getattr(gdat, 'init' + gmod.nameparagenrbase)
print('Conflicting initial state arguments detected, init keyword takes precedence.')
except:
pass
try:
raise Exception('')
initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)
if gdat.typeverb > 0:
print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))
except:
pass
## PSF
if gdat.initpsfp is not None:
print('Initializing the metamodel PSF from the provided initial state...')
if gdat.initpsfp.size != gmod.indxpara.psfp.size:
raise Exception('')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)
if gdat.initpsfprefr:
print('Initializing the metamodel PSF from the reference state...')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)
if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:
if gdat.typeverb > 0:
print('Initializing from a random state...')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
# check the initial unit sample vector for bad entries
if gmod.numbparaelem > 0:
indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)
if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():
raise Exception('')
indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]
indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]
indxsampbadduppr = indxsampdiff[indxsampbadduppr]
else:
indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]
indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))
if indxsampbadd.size > 0:
print('Initial value caused unit sample vector to go outside the unit interval...')
show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)
gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)
raise Exception('')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
indxbadd = np.where(np.logical_not(np.isfinite(gmod.this.paragenrscalfull)))[0]
if indxbadd.size > 0:
raise Exception('')
def writfile(gdattemp, path):
filepick = open(path + '.p', 'wb')
filearry = h5py.File(path + '.h5', 'w')
gdattemptemp = tdpy.gdatstrt()
for attr, valu in gdattemp.__dict__.items():
if attr.endswith('psfnintp'):
continue
if isinstance(valu, np.ndarray) and valu.dtype != np.dtype('O') and valu.dtype != np.dtype('<U4'):# or isinstance(valu, str) or \
#isinstance(valu, float) or isinstance(valu, bool) or isinstance(valu, int) or isinstance(valu, np.float):
filearry.create_dataset(attr, data=valu)
else:
# temp -- make sure interpolation objects are not written.
if attr != 'adisobjt' and attr != 'redsfromdlosobjt' and attr != 'edisintp':
setattr(gdattemptemp, attr, valu)
print('Writing to %s...' % path)
pickle.dump(gdattemptemp, filepick, protocol=pickle.HIGHEST_PROTOCOL)
filepick.close()
filearry.close()
def retr_deflcutf(angl, defs, asca, acut, asym=False):
fracanglasca = angl / asca
deflcutf = defs / fracanglasca
# second term in the NFW deflection profile
fact = np.ones_like(fracanglasca)
indxlowr = np.where(fracanglasca < 1.)[0]
indxuppr = np.where(fracanglasca > 1.)[0]
fact[indxlowr] = np.arccosh(1. / fracanglasca[indxlowr]) / np.sqrt(1. - fracanglasca[indxlowr]**2)
fact[indxuppr] = np.arccos(1. / fracanglasca[indxuppr]) / np.sqrt(fracanglasca[indxuppr]**2 - 1.)
if asym:
deflcutf *= np.log(fracanglasca / 2.) + fact
else:
fracacutasca = acut / asca
factcutf = fracacutasca**2 / (fracacutasca**2 + 1)**2 * ((fracacutasca**2 + 1. + 2. * (fracanglasca**2 - 1.)) * fact + \
np.pi * fracacutasca + (fracacutasca**2 - 1.) * np.log(fracacutasca) + np.sqrt(fracanglasca**2 + fracacutasca**2) * (-np.pi + (fracacutasca**2 - 1.) / fracacutasca * \
np.log(fracanglasca / (np.sqrt(fracanglasca**2 + fracacutasca**2) + fracacutasca))))
deflcutf *= factcutf
return deflcutf
def initchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime())
def stopchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime() - getattr(gdatmodi.this, 'chro' + name))
def retr_defl(gdat, indxpixlelem, lgal, bgal, angllens, ellp=None, angl=None, rcor=None, asca=None, acut=None):
# translate the grid
lgaltran = gdat.lgalgrid[indxpixlelem] - lgal
bgaltran = gdat.bgalgrid[indxpixlelem] - bgal
if acut is not None:
defs = angllens
angl = np.sqrt(lgaltran**2 + bgaltran**2)
defl = retr_deflcutf(angl, defs, asca, acut)
defllgal = lgaltran / angl * defl
deflbgal = bgaltran / angl * defl
else:
bein = angllens
# rotate the grid
lgalrttr = np.cos(angl) * lgaltran - np.sin(angl) * bgaltran
bgalrttr = np.sin(angl) * lgaltran + np.cos(angl) * bgaltran
axisrati = 1. - ellp
facteccc = np.sqrt(1. - axisrati**2)
factrcor = np.sqrt(axisrati**2 * lgalrttr**2 + bgalrttr**2)
defllgalrttr = bein * axisrati / facteccc * np.arctan(facteccc * lgalrttr / factrcor)
deflbgalrttr = bein * axisrati / facteccc * np.arctanh(facteccc * bgalrttr / factrcor)
# totate back vector to original basis
defllgal = np.cos(angl) * defllgalrttr + np.sin(angl) * deflbgalrttr
deflbgal = -np.sin(angl) * defllgalrttr + np.cos(angl) * deflbgalrttr
defl = np.vstack((defllgal, deflbgal)).T
return defl
def retr_lpriselfdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_self(feat, minm, maxm)))
return lpri
def retr_lprilogtdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_logt(feat, minm, maxm)))
return lpri
def retr_lpripowrdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
gmod = getattr(gdat, strgmodl)
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + strgfeat + 'pop%d' % l)]
lpri = np.sum(np.log(pdfn_powr(feat, minm, maxm, slop)))
return lpri
def retr_lpridpowdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
brek = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distbrek')[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + strgfeat)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + strgfeat)[l]]
lpri = np.sum(np.log(pdfn_dpow(feat, minm, maxm, brek, sloplowr, slopuppr)))
return lpri
def retr_lprigausdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
distmean = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'diststdv')[l]]
lpri = np.sum(np.log(pdfn_gaus(feat, distmean, diststdv)))
return lpri
def retr_lpriigamdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
slop = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'slop')[l]]
cutf = getattr(gmod, 'cutf' + strgfeat)
lpri = np.sum(np.log(pdfn_igam(feat, slop, cutf)))
return lpri
def traptdim(gdat, arry):
s1 = arry[0, 0] + arry[-1, 0] + arry[0, -1] + arry[-1, -1]
s2 = np.sum(arry[1:-1, 0]) + np.sum(arry[1:-1, -1]) + np.sum(arry[0, 1:-1]) + np.sum(arry[-1, 1:-1])
s3 = np.sum(arry[1:-1, 1:-1])
summ = (s1 + 2*s2 + 4*s3) * gdat.apix
return summ
def retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons=None):
pdfnspatprio = pdfnspatpriotemp
if spatdistcons is not None:
pdfnspatprio += spatdistcons
summ = traptdim(gdat, pdfnspatprio)
pdfnspatprio /= summ
lpdfspatprio = np.log(pdfnspatprio)
lpdfspatprioobjt = sp.interpolate.RectBivariateSpline(gdat.binspara.bgalcart, gdat.binspara.lgalcart, lpdfspatprio)
return lpdfspatprio, lpdfspatprioobjt
def retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=False):
if strgmodl == 'true':
gdatobjt = gdat.true
elif strgmodl == 'fitt' and boolinit:
gdatobjt = gdat.fitt
else:
gdatobjt = gdatmodi
return gdatobjt
def proc_samp(gdat, gdatmodi, strgstat, strgmodl, fast=False, boolinit=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=boolinit)
gmodstat = getattr(gdatobjt, strgstat)
initchro(gdat, gdatmodi, 'pars')
# grab the sample vector
indxpara = np.arange(gmodstat.paragenrscalfull.size)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.paragenrscalfull).all():
raise Exception('')
if gmod.typeevalpsfn != 'none' and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
psfp = gmodstat.paragenrscalfull[gmod.indxpara.psfp]
if gdat.booldiagmode:
if np.where(psfp == 0)[0].size == psfp.size:
raise Exception('')
setattr(gmodstat, 'psfp', psfp)
bacp = gmodstat.paragenrscalfull[gmod.indxpara.bacp]
if gmod.numbparaelem > 0:
# temp -- this may slow down execution
gmodstat.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodstat.indxelemfull, strgmodl)
gmodstat.numbelem = np.empty(gmod.numbpopl, dtype=int)
indxelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
indxelem[l] = np.arange(gmodstat.numbelem[l])
gmodstat.numbelem[l] = np.sum(gmodstat.numbelem[l])
gmodstat.numbelemtotl = np.sum(gmodstat.numbelem)
gmodstat.dictelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.dictelem[l] = dict()
for strgfeat in gmod.namepara.genrelemdefa:
gmodstat.dictelem[l][strgfeat] = []
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem] = gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]
if gdat.booldiagmode:
if ((abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) < 1e-100 ) & (abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) > 0.)).any():
raise Exception('')
if gmodstat.numbelem[l] != len(gmodstat.dictelem[l][nameparagenrelem]):
print('l')
print(l)
print('numbelem')
print(numbelem)
print('gmodstat.dictelem')
print(gmodstat.dictelem)
print('nameparagenrelem')
print(nameparagenrelem)
raise Exception('')
if gdat.boolbinsener:
if gdat.typeverb > 2:
print('Calculating element spectra...')
initchro(gdat, gdatmodi, 'spec')
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], \
edisintp=gdat.edisintp, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'spec')
if gdat.typeverb > 2:
print('Element features:')
for l in gmod.indxpopl:
print('l')
print(l)
for strgfeat in gmod.namepara.genrelem[l]:
print(strgfeat)
print(gmodstat.dictelem[l][strgfeat])
if gdat.booldiagmode:
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if (gmod.listscalparagenrelem[l][g] != 'gaus' and not gmod.listscalparagenrelem[l][g].startswith('lnor')) and \
(gmod.listscalparagenrelem[l][g] != 'expo' and (gmodstat.dictelem[l][nameparagenrelem] < getattr(gmod.minmpara, nameparagenrelem)).any()) or \
(gmodstat.dictelem[l][nameparagenrelem] > getattr(gmod.maxmpara, nameparagenrelem)).any():
print('l, g')
print(l, g)
print('nameparagenrelem')
print(nameparagenrelem)
print('gmodstat.dictelem[l][nameparagenrelem]')
summgene(gmodstat.dictelem[l][nameparagenrelem])
print('getattr(gmod, minm + nameparagenrelem)')
print(getattr(gmod.minmpara, nameparagenrelem))
print('getattr(gmod, maxm + nameparagenrelem)')
print(getattr(gmod.maxmpara, nameparagenrelem))
print('gmod.listscalparagenrelem[l][g]')
print(gmod.listscalparagenrelem[l][g])
raise Exception('')
# calculate element spectra
# temp
if gdat.booldiagmode:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['acut']] < 0.)[0]
if indx.size > 0:
raise Exception('')
if gdat.variacut:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['asca']] < 0.)[0]
if indx.size > 0:
raise Exception('')
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
# evaluate horizontal and vertical position for elements whose position is a power law in image-centric radius
if gmod.typespatdist[l] == 'glc3':
gmodstat.dictelem[l]['dlos'], gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'] = retr_glc3(gmodstat.dictelem[l]['dglc'], \
gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'])
if gmod.typespatdist[l] == 'gangexpo':
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], = retr_lgalbgal(gmodstat.dictelem[l]['gang'], \
gmodstat.dictelem[l]['aang'])
if gdat.booldiagmode:
if gmodstat.numbelem[l] > 0:
if np.amin(gmodstat.dictelem[l]['lgal']) < gmod.minmlgal or \
np.amax(gmodstat.dictelem[l]['lgal']) > gmod.maxmlgal or \
np.amin(gmodstat.dictelem[l]['bgal']) < gmod.minmbgal or \
np.amax(gmodstat.dictelem[l]['bgal']) > gmod.maxmbgal:
raise Exception('Bad coordinates!')
if gmod.typespatdist[l] == 'los3':
gmodstat.dictelem[l]['dglc'], gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'] = retr_los3(gmodstat.dictelem[l]['dlos'], \
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
# evaluate flux for pulsars
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['lumi'] = retr_lumipuls(gmodstat.dictelem[l]['geff'], gmodstat.dictelem[l]['magf'], gmodstat.dictelem[l]['per0'])
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
gmodstat.dictelem[l]['lumi'] = gmodstat.dictelem[l]['lum0'] * (1. + gmodstat.dictelem[l]['reds'])**4
if gmod.typeelem[l] == 'lghtpntspuls' or gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['flux'] = retr_flux(gdat, gmodstat.dictelem[l]['lumi'], gmodstat.dictelem[l]['dlos'])
# evaluate spectra
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], edisintp=gdat.edisintp, spectype=gmod.spectype[l])
else:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'pars')
### loglikelihood
initchro(gdat, gdatmodi, 'modl')
if gmod.boollens:
lgalsour = gmodstat.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gmodstat.paragenrscalfull[gmod.indxpara.bgalsour]
if gdat.typeverb > 2:
print('Evaluating the likelihood...')
# process a sample vector and the occupancy list to calculate secondary variables
if gmod.boollens:
fluxsour = gmodstat.paragenrscalfull[gmod.indxpara.fluxsour]
if gdat.numbener > 1:
sindsour = gmodstat.paragenrscalfull[gmod.indxpara.sindsour]
sizesour = gmodstat.paragenrscalfull[gmod.indxpara.sizesour]
ellpsour = gmodstat.paragenrscalfull[gmod.indxpara.ellpsour]
anglsour = gmodstat.paragenrscalfull[gmod.indxpara.anglsour]
if gmod.typeemishost != 'none':
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
fluxhost = [[] for e in gmod.indxsersfgrd]
if gdat.numbener > 1:
sindhost = [[] for e in gmod.indxsersfgrd]
sizehost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % e)]
bgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % e)]
fluxhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'fluxhostisf%d' % e)]
if gdat.numbener > 1:
sindhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sindhostisf%d' % e)]
sizehost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sizehostisf%d' % e)]
if gmod.boollens:
beinhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
beinhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % e)]
if gmod.typeemishost != 'none':
ellphost = [[] for e in gmod.indxsersfgrd]
anglhost = [[] for e in gmod.indxsersfgrd]
serihost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
ellphost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'ellphostisf%d' % e)]
anglhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'anglhostisf%d' % e)]
serihost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'serihostisf%d' % e)]
if gmod.boollens:
numbpixltemp = gdat.numbpixlcart
defl = np.zeros((numbpixltemp, 2))
# determine the indices of the pixels over which element kernels will be evaluated
if gdat.boolbinsspat:
if gmod.numbparaelem > 0:
listindxpixlelem = [[] for l in gmod.indxpopl]
listindxpixlelemconc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
listindxpixlelem[l], listindxpixlelemconc[l] = retr_indxpixlelemconc(gdat, strgmodl, gmodstat.dictelem, l)
if gmod.boollens:
sherextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sherextr')]
sangextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sangextr')]
## host halo deflection
initchro(gdat, gdatmodi, 'deflhost')
deflhost = [[] for e in gmod.indxsersfgrd]
indxpixlmiss = gdat.indxpixlcart
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the deflection field due to host galaxy %d' % e)
print('lgalhost[e]')
print(lgalhost[e])
print('bgalhost[e]')
print(bgalhost[e])
print('beinhost[e]')
print(beinhost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
deflhost[e] = retr_defl(gdat, indxpixlmiss, lgalhost[e], bgalhost[e], beinhost[e], ellp=ellphost[e], angl=anglhost[e])
if gdat.booldiagmode:
indxpixltemp = slice(None)
setattr(gmodstat, 'deflhostisf%d' % e, deflhost[e])
if gdat.typeverb > 2:
print('deflhost[e]')
summgene(deflhost[e])
defl += deflhost[e]
if gdat.typeverb > 2:
print('After adding the host deflection...')
print('defl')
summgene(defl)
if gdat.booldiagmode:
if not np.isfinite(deflhost).all():
raise Exception('')
stopchro(gdat, gdatmodi, 'deflhost')
## external shear
initchro(gdat, gdatmodi, 'deflextr')
deflextr = []
indxpixltemp = gdat.indxpixlcart
deflextr = retr_deflextr(gdat, indxpixltemp, sherextr, sangextr)
defl += deflextr
if gdat.typeverb > 2:
print('After adding the external deflection...')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'deflextr')
# Boolean flag to indicate that the object to convolve the image will be needed
boolneedpsfnconv = gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full')
## Boolean flag to indicate that the object to convolve the image will be constructed
boolcalcpsfnconv = strgmodl == 'true' or boolinit or gdat.boolmodipsfn
# get the convolution object
if boolneedpsfnconv and boolcalcpsfnconv:
initchro(gdat, gdatmodi, 'psfnconv')
if gdat.typeverb > 2:
print('Evaluating the PSF convolution kernel...')
psfnconv = [[[] for i in gdat.indxener] for m in gdat.indxevtt]
if gdat.typepixl == 'cart':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
for mm, m in enumerate(gdat.indxevtt):
for ii, i in enumerate(gdat.indxener):
if gmod.typemodlpsfn == 'singgaus':
sigm = psfp[i+m*gdat.numbener]
else:
sigm = fwhm[i, m] / 2.355
gmodstat.psfnconv[mm][ii] = AiryDisk2DKernel(sigm / gdat.sizepixl)
stopchro(gdat, gdatmodi, 'psfnconv')
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and gmod.numbparaelem > 0:
if strgmodl == 'true' or boolinit or gdat.boolmodipsfn:
if gdat.typepixl == 'heal':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gdat.typepixl == 'cart':
if gdat.kernevaltype == 'ulip':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
if gdat.booldiagmode:
if not np.isfinite(gmodstat.psfnintp(0.05)).all():
raise Exception('')
if gdat.kernevaltype == 'bspx':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.anglcart.flatten(), gmod.typemodlpsfn, strgmodl)
# side length of the upsampled kernel
gdat.numbsidekernusam = 100
# side length of the original kernel
gdat.numbsidekern = gdat.numbsidekernusam / factkernusam
gdat.indxsidekern = np.arange(gdat.numbsidekern)
# pad by one row and one column
#psf = np.zeros((gdat.numbsidekernusam+1, gdat.numbsidekernusam+1))
#psf[0:gdat.numbsidekernusam, 0:gdat.numbsidekernusam] = psf0
# make design matrix for each factkernusam x factkernusam region
nx = factkernusam + 1
y, x = mgrid[0:nx, 0:nx] / float(factkernusam)
x = x.flatten()
y = y.flatten()
kernmatrdesi = np.array([full(nx*nx, 1), x, y, x*x, x*y, y*y, x*x*x, x*x*y, x*y*y, y*y*y]).T
# output np.array of coefficients
gmodstat.psfnintp = np.empty((gdat.numbsidekern, gdat.numbsidekern, kernmatrdesi.shape[1]))
# solve p = kernmatrdesi psfnintp for psfnintp
for iy in gdat.indxsidekern:
for ix in gdat.indxsidekern:
p = psf[iy*factkernusam:(iy+1)*factkernusam+1, ix*factkernusam:(ix+1)*factkernusam+1].flatten()
gmodstat.psfnintp[iy, ix, :] = dot(linalg.inv(dot(kernmatrdesi.T, kernmatrdesi)), dot(kernmatrdesi.T, p))
else:
gmodstat.psfnintp = gdat.fitt.this.psfnintp
sbrt = dict()
for name in gmod.listnamediff:
sbrt[name] = []
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = []
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = []
if gmod.boolelemdeflsubhanyy:
deflsubh = []
# retrieve or initialize state variable
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = np.zeros_like(gdat.expo)
if gmod.boolelemdeflsubhanyy:
deflsubh = np.zeros((gdat.numbpixl, 2))
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = np.zeros_like(gdat.expo)
# element kernel evaluation
if gmod.boolelemsbrtdfncanyy:
initchro(gdat, gdatmodi, 'elemsbrtdfnc')
sbrt['dfnc'] = []
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
for k in range(gmodstat.numbelem[l]):
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
if gmod.typeelem[l] == 'clusvari':
sbrtdfnc[0, listindxpixlelem[l][k], 0] += gmodstat.dictelem[l]['nobj'][k] / 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[listindxpixlelem[l][k]])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[listindxpixlelem[l][k]])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
if gmod.boolelempsfn[l]:
print('sbrtdfnc')
summgene(sbrtdfnc)
sbrtdfnc[:, listindxpixlelem[l][k], :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
if gmod.typeelem[l].startswith('lghtline'):
sbrtdfnc[:, 0, 0] += gmodstat.dictelem[l]['spec'][:, k]
sbrt['dfnc'] = sbrtdfnc
if gdat.booldiagmode:
if not np.isfinite(sbrtdfnc).all():
raise Exception('Element delta function brightness not finite.')
setattr(gmodstat, 'sbrtdfnc', sbrt['dfnc'])
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['dfnc'])
numbelemtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
numbelemtemp += np.sum(gmodstat.numbelem[l])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtdfnc')
if gmod.boolelemdeflsubhanyy:
initchro(gdat, gdatmodi, 'elemdeflsubh')
if gdat.typeverb > 2:
print('Perturbing subhalo deflection field')
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
for kk, k in enumerate(indxelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
if gmod.typeelemspateval[l] == 'locl':
indxpixl = listindxpixlelem[l][kk]
else:
indxpixl = gdat.indxpixl
deflsubh[indxpixl, :] += retr_defl(gdat, indxpixl, \
gmodstat.dictelem[l]['lgal'][kk], gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['defs'][kk], \
asca=asca, acut=acut)
# temp -- find out what is causing the features in the element convergence maps
#for kk, k in enumerate(indxelem[l]):
# indxpixlpnts = retr_indxpixl(gdat, gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['lgal'][kk])
# if deflsubh[listindxpixlelem[l][kk], :]
if gdat.typeverb > 2:
print('deflsubh')
summgene(deflsubh)
setattr(gmodstat, 'deflsubh', deflsubh)
if gdat.booldiagmode:
if not np.isfinite(deflsubh).all():
raise Exception('Element deflection is not finite.')
defl += deflsubh
if gdat.typeverb > 2:
print('After adding subhalo deflection to the total deflection')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'elemdeflsubh')
if gmod.boolelemsbrtextsbgrdanyy:
initchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if strgstat == 'this':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtgausbgrd':
for k in range(gmodstat.numbelem[l]):
sbrtextsbgrd[:, listindxpixlelem[l][k], :] += gmodstat.dictelem[l]['spec'][:, k, None, None] / \
2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[None, listindxpixlelem[l][k], None])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[None, listindxpixlelem[l][k], None])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
setattr(gmodstat, 'sbrtextsbgrd', sbrtextsbgrd)
sbrt['extsbgrd'] = []
sbrt['extsbgrd'] = sbrtextsbgrd
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['extsbgrd'])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if gdat.typeverb > 2:
print('Element related state variables after perturbations...')
if gmod.boolelemsbrtdfncanyy:
print('sbrtdfnc')
summgene(sbrtdfnc)
if gmod.boolelemdeflsubhanyy:
print('deflsubh')
summgene(deflsubh)
if gmod.boolelemsbrtextsbgrdanyy:
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
if gmod.boollens:
# lensed surface brightness
initchro(gdat, gdatmodi, 'sbrtlens')
if gdat.typeverb > 2:
print('Evaluating lensed surface brightness...')
if strgstat == 'this' or gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrd'] = []
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrdgalx'] = []
if gdat.numbener > 1:
specsour = retr_spec(gdat, np.array([fluxsour]), sind=np.array([sindsour]))
if gdat.typeverb > 2:
print('sindsour')
print(sindsour)
else:
specsour = np.array([fluxsour])
if gdat.typeverb > 2:
print('lgalsour')
print(lgalsour)
print('bgalsour')
print(bgalsour)
print('sizesour')
print(sizesour)
print('ellpsour')
print(ellpsour)
print('anglsour')
print(anglsour)
print('fluxsour')
print(fluxsour)
print('specsour')
print(specsour)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.typeverb > 2:
print('Interpolating the background emission...')
sbrt['bgrdgalx'] = retr_sbrtsers(gdat, gdat.lgalgrid[indxpixlelem[0]], gdat.bgalgrid[indxpixlelem[0]], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
if gdat.typeverb > 2:
print('sbrt[bgrdgalx]')
summgene(sbrt['bgrdgalx'])
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
sbrt['bgrd'] = sbrt['bgrdgalx'] + sbrtextsbgrd
sbrt['lens'] = np.empty_like(gdat.cntpdata)
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
sbrtbgrdobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
sbrt['bgrd'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
bgalprim = gdat.bgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 1]
lgalprim = gdat.lgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 0]
# temp -- T?
sbrt['lens'][ii, :, m] = sbrtbgrdobjt(bgalprim, lgalprim, grid=False).flatten()
else:
if gdat.typeverb > 2:
print('Not interpolating the background emission...')
sbrt['lens'] = retr_sbrtsers(gdat, gdat.lgalgrid - defl[gdat.indxpixl, 0], \
gdat.bgalgrid - defl[gdat.indxpixl, 1], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
sbrt['bgrd'] = retr_sbrtsers(gdat, gdat.lgalgrid, \
gdat.bgalgrid, \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
setattr(gmodthis, 'sbrtlens', sbrt['lens'])
if gdat.booldiagmode:
if not np.isfinite(sbrt['lens']).all():
raise Exception('Lensed emission is not finite.')
if (sbrt['lens'] == 0).all():
raise Exception('Lensed emission is zero everynp.where.')
stopchro(gdat, gdatmodi, 'sbrtlens')
### background surface brightness
sbrtback = []
# temp
#sbrtback = np.empty((numbback, gdat.numbener, indxpixlelem[yy].size, gdat.numbevtt))
# evaluate host galaxy surface brightness
if gmod.typeemishost != 'none':
initchro(gdat, gdatmodi, 'sbrthost')
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the host galaxy surface brightness...')
if gdat.numbener > 1:
spechost = retr_spec(gdat, np.array([fluxhost[e]]), sind=np.array([sindhost[e]]))
else:
spechost = np.array([fluxhost[e]])
if gdat.typeverb > 2:
print('lgalhost[e]')
print(lgalhost[e] * gdat.anglfact)
print('bgalhost[e]')
print(bgalhost[e] * gdat.anglfact)
print('spechost')
print(spechost)
print('sizehost[e]')
print(sizehost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
print('serihost[e]')
print(serihost[e])
sbrt['hostisf%d' % e] = retr_sbrtsers(gdat, gdat.lgalgrid, gdat.bgalgrid, lgalhost[e], \
bgalhost[e], spechost, sizehost[e], ellphost[e], anglhost[e], serihost[e])
setattr(gmodstat, 'sbrthostisf%d' % e, sbrt['hostisf%d' % e])
#sbrthost = sbrt['host']
if gdat.typeverb > 2:
for e in gmod.indxsersfgrd:
print('e')
print(e)
print('sbrt[hostisf%d]')
summgene(sbrt['hostisf%d' % e])
stopchro(gdat, gdatmodi, 'sbrthost')
## model emission
initchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('Summing up the model emission...')
sbrt['modlraww'] = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
for name in gmod.listnamediff:
if name.startswith('back'):
gmod.indxbacktemp = int(name[4:8])
if gdat.typepixl == 'heal' and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv') and not gmod.boolunifback[gmod.indxbacktemp]:
sbrttemp = getattr(gmod, 'sbrtbackhealfull')[gmod.indxbacktemp]
else:
sbrttemp = gmod.sbrtbacknorm[gmod.indxbacktemp]
if gmod.boolspecback[gmod.indxbacktemp]:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp]]
else:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp][gdat.indxener]][:, None, None]
sbrt['modlraww'] += sbrt[name]
if gdat.booldiagmode:
if np.amax(sbrttemp) == 0.:
raise Exception('')
if gdat.typeverb > 2:
print('name')
print(name)
print('sbrt[name]')
summgene(sbrt[name])
if gdat.typeverb > 2:
for ii, i in enumerate(gdat.indxener):
print('ii, i')
print(ii, i)
for mm, m in enumerate(gdat.indxevtt):
print('mm, m')
print(mm, m)
print('sbrt[modlraww][ii, :, mm]')
summgene(sbrt['modlraww'][ii, :, mm])
# convolve the model with the PSF
if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):
sbrt['modlconv'] = []
# temp -- isotropic background proposals are unnecessarily entering this clause
if gdat.typeverb > 2:
print('Convolving the model image with the PSF...')
sbrt['modlconv'] = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
if gdat.strgcnfg == 'pcat_ferm_igal_mock_test':
print('Convolving ii, i, mm, m')
print(ii, i, mm, m)
if gdat.typepixl == 'cart':
if gdat.numbpixl == gdat.numbpixlcart:
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrt['modlraww'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)), \
psfnconv[mm][ii]).flatten()
else:
sbrtfull = np.zeros(gdat.numbpixlcart)
sbrtfull[gdat.indxpixlrofi] = sbrt['modlraww'][ii, :, mm]
sbrtfull = sbrtfull.reshape((gdat.numbsidecart, gdat.numbsidecart))
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrtfull, psfnconv[mm][ii]).flatten()[gdat.indxpixlrofi]
indx = np.where(sbrt['modlconv'][ii, :, mm] < 1e-50)
sbrt['modlconv'][ii, indx, mm] = 1e-50
if gdat.typepixl == 'heal':
sbrt['modlconv'][ii, :, mm] = hp.smoothing(sbrt['modlraww'][ii, :, mm], fwhm=fwhm[i, m])[gdat.indxpixlrofi]
sbrt['modlconv'][ii, :, mm][np.where(sbrt['modlraww'][ii, :, mm] <= 1e-50)] = 1e-50
setattr(gmodstat, 'sbrtmodlconv', sbrt['modlconv'])
# temp -- this could be made faster -- need the copy() statement because sbrtdfnc gets added to sbrtmodl afterwards
sbrt['modl'] = np.copy(sbrt['modlconv'])
else:
if gdat.typeverb > 2:
print('Skipping PSF convolution of the model...')
sbrt['modl'] = np.copy(sbrt['modlraww'])
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
## add PSF-convolved delta functions to the model
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gdat.typeverb > 2:
print('Adding delta functions into the model...')
print('sbrt[dfnc]')
summgene(sbrt['dfnc'])
sbrt['modl'] += sbrt['dfnc']
stopchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
### count map
initchro(gdat, gdatmodi, 'expo')
cntp = dict()
cntp['modl'] = retr_cntp(gdat, sbrt['modl'])
if gdat.booldiagmode:
setattr(gmodstat, 'cntpmodl', cntp['modl'])
stopchro(gdat, gdatmodi, 'expo')
# mock data specific
if strgmodl == 'true' and strgstat == 'this':
# generate count data
cntptemp = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for i in gdat.indxener:
for j in gdat.indxpixl:
for m in gdat.indxevtt:
cntptemp[i, j, m] = np.random.poisson(cntp['modl'][i, j, m])
setattr(gdat, 'cntpdata', cntptemp)
if not gdat.boolsqzeexpo and np.amax(cntptemp) == 0:
print('cntp[modl]')
summgene(cntp['modl'])
print('gdat.boolsqzeexpo')
print(gdat.boolsqzeexpo)
print('cntptemp')
summgene(cntptemp)
raise Exception('Data is zero.')
proc_cntpdata(gdat)
## diagnostics
if gdat.booldiagmode:
frac = cntp['modl'] / np.mean(cntp['modl'])
if np.amin(frac) < -1e-3 and np.amin(cntp['modl']) < -0.1:
raise Exception('')
indxcubebadd = np.where(cntp['modl'] < 0.)[0]
if indxcubebadd.size > 0:
print('Warning! Model prediction is negative. Correcting to 1e-20...')
cntp['modl'][indxcubebadd] = 1e-20
stopchro(gdat, gdatmodi, 'modl')
# log-prior
initchro(gdat, gdatmodi, 'lpri')
if gdat.typeverb > 2:
print('Evaluating the prior...')
lpri = np.zeros(gmod.numblpri)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
lpri[0] -= 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[l] * gmodstat.numbelem[l]
if gdat.penalpridiff:
sbrtdatapnts = gdat.sbrtdata - sbrt['dfnc']
if gdat.typepixl == 'heal':
raise Exception('')
if gdat.typepixl == 'cart':
psecodimdatapnts = np.empty((gdat.numbener, gdat.numbsidecarthalf, gdat.numbevtt))
psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
sigm = fwhm / 2.355
psecodimdatapntsprio = np.exp(-2. * gdat.meanpara.mpolodim[None, :, None] / (0.1 / sigm[:, None, :]))
lpridiff = 0.
for i in gdat.indxener:
for m in gdat.indxevtt:
psecdatapnts = retr_psec(gdat, sbrtdatapnts[i, :, m])
psecodimdatapnts[i, :, m] = retr_psecodim(gdat, psecdatapnts)
psecodimdatapnts[i, :, m] /= psecodimdatapnts[i, 0, m]
lpridiff += -0.5 * np.sum((psecodimdatapnts[i, :, m] - psecodimdatapntsprio[i, :, m])**2)
setattr(gmodstat, 'psecodimdatapntsen%02devt%d' % (i, m), psecodimdatapnts[i, :, m])
setattr(gmodstat, 'psecodimdatapntsprioen%02devt%d'% (i, m), psecodimdatapntsprio[i, :, m])
lpri[1] = lpridiff
setattr(gmodstat, 'lpridiff', lpridiff)
if gmod.typemodltran == 'pois':
meanelem = gmodstat.paragenrscalfull[gmod.indxpara.meanelem]
for l in gmod.indxpopl:
lpri[2] += retr_lprbpois(gmodstat.numbelem[l], meanelem[l])
for l in gmod.indxpopl:
for g, (strgfeat, strgpdfn) in enumerate(zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l])):
indxlpritemp = 3 + l * gmod.numbparagenrelem + g
lpri[indxlpritemp] = retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, gmodstat.paragenrscalfull, gmodstat.dictelem, gmodstat.numbelem)
lpritotl = np.sum(lpri)
if gdat.typeverb > 1:
print('lpritotl')
print(lpritotl)
### log-likelihood
initchro(gdat, gdatmodi, 'llik')
llik = retr_llik(gdat, strgmodl, cntp['modl'])
if gdat.typeverb > 2:
print('cntp[modl]')
summgene(cntp['modl'])
print('np.sum(cntp[modl], (1, 2))')
print(np.sum(cntp['modl'], (1, 2)))
print('np.sum(gdat.cntpdata, (1, 2))')
print(np.sum(gdat.cntpdata, (1, 2)))
if gdat.booldiagmode:
if not np.isfinite(llik).all():
raise Exception('Likelihood is not finite.')
gmodstat.lliktotl = np.sum(llik)
if gdat.booldiagmode:
if isinstance(gmodstat.lliktotl, np.ndarray):
raise Exception('')
if not np.isfinite(gmodstat.lliktotl).all():
raise Exception('')
numbdoff = gdat.numbdata - gmod.numbparagenrbase
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
numbdoff -= len(gmodstat.indxparagenrfullelem[l]['full'])
setattr(gmodstat, 'llik', llik)
setattr(gmodstat, 'llikmean', gmodstat.lliktotl / gdat.numbdata)
setattr(gmodstat, 'llikcmea', gmodstat.lliktotl / (gdat.numbdata - numbdoff))
if gdat.typeverb > 2:
print('llik')
summgene(llik)
if gdat.typeverb > 1:
print('gmodstat.lliktotl')
print(gmodstat.lliktotl)
stopchro(gdat, gdatmodi, 'llik')
lpostotl = lpritotl + gmodstat.lliktotl
if gdat.typeverb > 1:
print('lpostotl')
print(lpostotl)
setattr(gmodstat, 'lpritotl', lpritotl)
setattr(gmodstat, 'gmodstat.lliktotl', gmodstat.lliktotl)
setattr(gmodstat, 'lpostotl', lpostotl)
stopchro(gdat, gdatmodi, 'lpri')
if strgstat == 'next':
return
initchro(gdat, gdatmodi, 'tert')
setattr(gmodstat, 'lpri', lpri)
if gmod.numbparaelem > 0:
setattr(gmodstat, 'lpripena', lpri[0])
dicttert = {}
## load necessary variables
## derived variables
## residual count map
cntp['resi'] = []
cntp['resi'] = gdat.cntpdata - cntp['modl']
setattr(gmodstat, 'cntpmodl', cntp['modl'])
setattr(gmodstat, 'cntpresi', cntp['resi'])
setattr(gmodstat, 'llik', llik)
#if gmod.boollens:
# setattr(gmodstat, 'deflhost', deflhost)
if gmod.boollens:
setattr(gmodstat, 'defl', defl)
for e in gmod.indxsersfgrd:
masshostbein = massfrombein * beinhost[e]**2
setattr(gmodstat, 'masshostisf%dbein' % e, masshostbein)
### sort with respect to deflection at scale radius
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
indxelemsortampl = np.argsort(gmodstat.dictelem[l][nameparaelemsort[l]])[::-1]
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem + 'sort'] = gmodstat.dictelem[l][nameparagenrelem][indxelemsortampl]
deflsing = np.zeros((gdat.numbpixlcart, 2, numbdeflsingplot))
conv = np.zeros((gdat.numbpixlcart))
convpsec = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecodim = np.zeros((gdat.numbsidecarthalf))
if gmod.numbparaelem > 0:
if boolelemlens:
gmod.indxpopllens = gmod.typeelem.index('lens')
numbdeflsing = 2
if gmod.numbparaelem > 0:
if boolelemlens:
if numbelem[indxpopllens] > 0:
numbdeflsing += min(numbdeflsubhplot, numbelem[indxpopllens])
numbdeflsing += 1
for k in range(numbdeflsing):
indxpixltemp = gdat.indxpixlcart
if k == 0:
# temp -- should take other sersics into account
deflsing[indxpixltemp, :, k] = deflhost[0]
elif k == 1:
deflsing[indxpixltemp, :, k] = deflextr
elif k == 2:
deflsing[indxpixltemp, :, k] = defl - deflextr - deflhost[0]
else:
asca = gmodstat.dictelem[indxpopllens]['ascasort'][None, k-3]
acut = gmodstat.dictelem[indxpopllens]['acutsort'][None, k-3]
deflsing[listindxpixlelem[indxpopllens][k], :, k] = retr_defl(gdat, listindxpixlelem[indxpopllens][k], \
gmodstat.dictelem[indxpopllens]['lgalsort'][None, k-3], gmodstat.dictelem[indxpopllens]['bgalsort'][None, k-3], \
gmodstat.dictelem[indxpopllens]['defssort'][None, k-3], asca=asca, acut=acut)
# convergence
## total
conv[:] = retr_conv(gdat, defl)
convhost = np.zeros((gmod.numbsersfgrd, gdat.numbpixlcart))
for e in gmod.indxsersfgrd:
convhost[e, :] = retr_conv(gdat, deflhost[e])
### power spectrum
#### two dimensional
convpsec[:] = retr_psec(gdat, conv[:])
#### one dimensional
convpsecodim[:] = retr_psecodim(gdat, convpsec[:])
setattr(gmodstat, 'convpsec', convpsec)
setattr(gmodstat, 'convpsecodim', convpsecodim)
setattr(gmodstat, 'conv', conv[...])
for e in gmod.indxsersfgrd:
setattr(gmodstat, 'convisf%d' % e, convhost[e, ...])
## subhalos
if gmod.numbparaelem > 0:
if boolelemlens:
convelem = np.zeros((gdat.numbpixl))
convpsecelem = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecelemodim = np.zeros((gdat.numbsidecarthalf))
### convergence
convelem[:] = retr_conv(gdat, deflsubh)
### power spectrum
##### two dimensional
convpsecelem[:] = retr_psec(gdat, convelem[:])
##### one dimensional
convpsecelemodim[:] = retr_psecodim(gdat, convpsecelem[:])
setattr(gmodstat, 'convpsecelem', convpsecelem)
setattr(gmodstat, 'convpsecelemodim', convpsecelemodim)
setattr(gmodstat, 'convelem', convelem[...])
setattr(gmodstat, 'defl', defl)
### magnification
magn = np.empty((gdat.numbpixlcart))
histdefl = np.empty((gdat.numbdefl))
if gmod.numbparaelem > 0 and boolelemlens:
histdeflsubh = np.empty((gdat.numbdefl))
deflsingmgtd = np.zeros((gdat.numbpixlcart, numbdeflsingplot))
magn[:] = 1. / retr_invm(gdat, defl)
histdefl[:] = np.histogram(defl, bins=gdat.binspara.defl)[0]
if gmod.numbparaelem > 0:
if boolelemlens:
histdeflsubh[:] = np.histogram(deflsubh, bins=gdat.binspara.deflsubh)[0]
deflsingmgtd[:, :] = np.sqrt(np.sum(deflsing[...]**2, axis=1))
if gmod.numbparaelem > 0:
if boolelemlens:
setattr(gmodstat, 'histdeflsubh', histdeflsubh)
setattr(gmodstat, 'histdefl', histdefl)
setattr(gmodstat, 'magn', magn[...])
setattr(gmodstat, 'deflsing', deflsing[...])
setattr(gmodstat, 'deflsingmgtd', deflsingmgtd[...])
## element related
if gmod.numbparaelem > 0:
if gdat.numbpixl == 1:
for l in gmod.indxpopl:
for k in range(gmodstat.numbelem[l]):
setattr(gmodstat, 'speclinepop%d%04d' % (l, k), gmodstat.dictelem[l]['spec'][:, k])
if gdat.typedata == 'mock' and strgmodl == 'true' and gdat.numbpixl > 1:
gdat.refrlgal = [[] for l in gmod.indxpopl]
gdat.refrbgal = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gdat.refrlgal[l] = np.tile(gmodstat.dictelem[l]['lgal'], [3] + list(np.ones(gmodstat.dictelem[l]['lgal'].ndim, dtype=int)))
gdat.refrbgal[l] = np.tile(gmodstat.dictelem[l]['bgal'], [3] + list(np.ones(gmodstat.dictelem[l]['bgal'].ndim, dtype=int)))
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['per1'] = retr_per1(gmodstat.dictelem[l]['per0'], gmodstat.dictelem[l]['magf'])
if gmod.numbparaelem > 0:
if strgstat == 'this' or gdat.boolrefeforc and strgmodl == 'fitt':
# correlate the fitting model elements with the reference elements
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
indxelemrefrasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] == 0:
continue
indxelemfittmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
indxelemrefrmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
matrdist = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]))
for k in range(gmodstat.numbelem[l]):
# construct a matrix of angular distances between reference and fitting elements
if gmod.typeelem[l].startswith('lghtline'):
matrdist[:, k] = abs(gdat.refrelin[q][0, :] - gmodstat.dictelem[l]['elin'][k]) / gdat.refrelin[q][0, :]
else:
matrdist[:, k] = retr_angldist(gdat, gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k])
indxelemrefrmatr[:, k] = np.arange(gdat.refr.numbelem[q])
indxelemfittmatr[:, k] = k
matrdist = matrdist.flatten()
indxelemrefrmatr = indxelemrefrmatr.flatten()
indxelemfittmatr = indxelemfittmatr.flatten()
# take only angular separations smaller than some threshold
indxmatrthrs = np.where(matrdist < gdat.anglassc)
matrdist = matrdist[indxmatrthrs]
indxelemrefrmatr = indxelemrefrmatr[indxmatrthrs]
indxelemfittmatr = indxelemfittmatr[indxmatrthrs]
# sort the remaining associations with respect to distance
indxmatrsort = np.argsort(matrdist)
matrdist = matrdist[indxmatrsort]
indxelemrefrmatr = indxelemrefrmatr[indxmatrsort]
indxelemfittmatr = indxelemfittmatr[indxmatrsort]
for c in range(matrdist.size):
if indxelemrefrmatr[c] in indxelemrefrasschits[q][l] or indxelemfittmatr[c] in indxelemfittasschits[q][l]:
continue
indxelemrefrasschits[q][l].append(indxelemrefrmatr[c])
indxelemfittasschits[q][l].append(indxelemfittmatr[c])
indxelemrefrasschits[q][l] = np.array(indxelemrefrasschits[q][l])
indxelemfittasschits[q][l] = np.array(indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasschits', indxelemrefrasschits)
setattr(gmodstat, 'indxelemfittasschits', indxelemfittasschits)
indxelemrefrasscmiss = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasscfals = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
# indices of the reference elements not associated with the fitting model elements
if gdat.refr.numbelem[q] > 0:
indxelemrefrasscmiss[q][l] = np.setdiff1d(np.arange(gdat.refr.numbelem[q]), indxelemrefrasschits[q][l])
# indices of the fitting model elements not associated with the reference elements
if gmodstat.numbelem[l] > 0:
indxelemfittasscfals[q][l] = np.setdiff1d(np.arange(gmodstat.numbelem[l]), indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasscmiss', indxelemrefrasscmiss)
setattr(gmodstat, 'indxelemfittasscfals', indxelemfittasscfals)
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for l in gmod.indxpopl:
# collect the associated reference element parameter for each fitting element
for strgfeat in gdat.refr.namepara.elemonly[q][l]:
name = strgfeat + gdat.listnamerefr[q]
if strgfeat != 'spec' and strgfeat != 'specplot':
refrfeat = getattr(gdat.refr, strgfeat)
gmodstat.dictelem[l][name] = np.zeros(gmodstat.numbelem[l])
if len(refrfeat[q]) > 0 and len(indxelemrefrasschits[q][l]) > 0:
gmodstat.dictelem[l][name][indxelemfittasschits[q][l]] = refrfeat[q][0, indxelemrefrasschits[q][l]]
print('temp')
continue
# collect the error in the associated reference element amplitude
for strgfeat in gdat.listnameparaetotlelemcomm[q][l]:
refrfeat = getattr(gdat.refr, strgfeat)
if strgfeat == gmod.nameparagenrelemampl[l] and len(indxelemfittasschits[q][l]) > 0:
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]] = np.zeros(gmodstat.numbelem[l])
fittfeattemp = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
refrfeattemp = refrfeat[q][0, indxelemrefrasschits[q][l]]
if gdat.booldiagmode:
if not np.isfinite(refrfeattemp).all():
raise Exception('')
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]][indxelemfittasschits[q][l]] = 100. * (fittfeattemp - refrfeattemp) / refrfeattemp
if gdat.boolrefeforc and strgmodl == 'fitt':
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat in gdat.refr.namepara.elem[gdat.indxrefrforc[l]]:
if len(indxelemrefrasschits[gdat.indxrefrforc[l]][l]) == 0:
continue
refrfeat = getattr(gdat.refr, strgfeat)[gdat.indxrefrforc[l]][0, indxelemrefrasschits[gdat.indxrefrforc[l]][l]]
if len(gmodstat.dictelem[l][strgfeat]) == 0:
continue
lpritotl += -2. * np.sum(1e6 * (gmodstat.dictelem[l][strgfeat][indxelemfittasschits[gdat.indxrefrforc[l]][l]] - refrfeat)**2 / refrfeat**2)
# other tertiary variables continues
## number of degrees of freedom
chi2doff = np.sum(cntp['resi']**2 / gdat.varidata) / numbdoff
if gdat.booldiagmode:
if not np.isfinite(cntp['resi']).all():
raise Exception('')
if not np.isfinite(numbdoff):
raise Exception('')
if not np.isfinite(chi2doff):
raise Exception('')
setattr(gmodstat, 'numbdoff', numbdoff)
setattr(gmodstat, 'chi2doff', chi2doff)
if gmod.boolelempsfn and gmod.numbparaelem > 0:
gmodstat.fwhmpsfn = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gmod.numbparaelem > 0:
### derived parameters
for l in gmod.indxpopl:
# luminosity
if gmod.boolelemlght[l] and 'flux' in gmod.namepara.genrelem[l]:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat.startswith('reds') and strgfeat != 'reds':
namerefr = strgfeat[-4:]
gmodstat.dictelem[l]['lumi' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
gmodstat.dictelem[l]['dlos' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
reds = gmodstat.dictelem[l]['reds' + namerefr]
indxgood = np.where(np.isfinite(gmodstat.dictelem[l]['reds' + namerefr]))[0]
if indxgood.size > 0:
# temp -- these units only work for energy units of keV
dlos = gdat.adisobjt(reds)
gmodstat.dictelem[l]['dlos' + namerefr][indxgood] = dlos
lumi = retr_lumi(gdat, gmodstat.dictelem[l]['flux'], dlos, reds)
gmodstat.dictelem[l]['lumi' + namerefr][indxgood] = lumi
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['mass'] = full([numbelem[l]], 3.)
if gdat.typeverb > 2:
print('l')
print(l)
if gdat.boolbinsspat:
#### radial and angular coordinates
gmodstat.dictelem[l]['gang'] = retr_gang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
gmodstat.dictelem[l]['aang'] = retr_aang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
if gmod.boolelemlght[l]:
#### number of expected counts
if gdat.boolbinsspat:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal']], gmodstat.dictelem[l]['spec'])
else:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['elin']], gmodstat.dictelem[l]['spec'])
#### delta log-likelihood
gmodstat.dictelem[l]['deltllik'] = np.zeros(gmodstat.numbelem[l])
if not (strgmodl == 'true' and gdat.checprio):
if gdat.typeverb > 2:
print('Calculating log-likelihood differences when removing elements from the model.')
for k in range(gmodstat.numbelem[l]):
# construct gdatmodi
gdatmoditemp = tdpy.gdatstrt()
gdatmoditemp.this = tdpy.gdatstrt()
gdatmoditemp.next = tdpy.gdatstrt()
gdatmoditemp.this.indxelemfull = gmodstat.indxelemfull
gdatmoditemp.this.paragenrscalfull = gmodstat.paragenrscalfull
gdatmoditemp.this.paragenrunitfull = gmodstat.paragenrunitfull
prop_stat(gdat, gdatmoditemp, strgmodl, deth=True, thisindxpopl=l, thisindxelem=k)
proc_samp(gdat, gdatmoditemp, 'next', strgmodl)#, boolinit=boolinit)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.lliktotl):
raise Exception('')
gdatobjttemp = retr_gdatobjt(gdat, gdatmoditemp, strgmodl)#, boolinit=boolinit)
nextlliktotl = gdatobjttemp.next.lliktotl
gmodstat.dictelem[l]['deltllik'][k] = gmodstat.lliktotl - nextlliktotl
if gdat.typeverb > 2:
print('deltllik calculation ended.')
# more derived parameters
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
### PSF FWHM
if gdat.typepixl == 'cart':
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
setattr(gmodstat, 'fwhm', fwhm)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gmod.numbparaelem > 0:
sbrt['dfnctotl'] = np.zeros_like(gdat.expo)
sbrt['dfncsubt'] = np.zeros_like(gdat.expo)
sbrt['dfncsupt'] = np.zeros_like(gdat.expo)
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'] = np.zeros_like(gdat.expo)
if gmod.boolelemsbrt[l]:
for k in range(gmodstat.numbelem[l]):
# read normalization from the element dictionary
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
# calculate imprint on the element surface brightness state variable
if gmod.boolelempsfn[l]:
sbrttemp = retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
indxpixltemp = listindxpixlelem[l][k]
if gmod.typeelem[l].startswith('lghtline'):
sbrttemp = gmodstat.dictelem[l]['spec'][:, k, None, None]
# add it to the state variable depending on the significance
sbrt['dfnctotl'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] > 35:
sbrt['dfncsupt'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] < 35:
sbrt['dfncsubt'][:, indxpixltemp, :] += sbrttemp
# calculate imprint without PSF truncation to calculate approximation errors
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'][:, :, :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, gdat.indxpixl)
setattr(gmodstat, 'sbrtdfncsubtpop%d' % l, sbrt['dfncsubt'])
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.booldiagmode:
numbtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtextsbgrd[l]:
numbtemp += np.sum(gmodstat.numbelem[l])
if numbtemp > 0 and (sbrtextsbgrd == 0.).all():
raise Exception('')
sbrt['bgrdexts'] = sbrtextsbgrd
#### count maps
cntp = dict()
for name in gmod.listnamegcom:
cntp[name] = retr_cntp(gdat, sbrt[name])
setattr(gmodstat, 'cntp' + name, cntp[name])
### spatial averages
sbrtmean = dict()
sbrtstdv = dict()
for name in gmod.listnamegcom:
sbrtmean[name], sbrtstdv[name] = retr_spatmean(gdat, sbrt[name])
for b in gdat.indxspatmean:
setattr(gmodstat, 'sbrt%smea%d' % (name, b), sbrtmean[name][b])
setattr(gmodstat, 'sbrt%sstd%d' % (name, b), sbrtstdv[name][b])
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
for i in gdat.indxener:
if 'dark' in gmod.listnamegcom:
fracsdenmeandarkdfncsubt = sbrtmean['dfncsubt'][0][0][i] / (sbrtmean['dfncsubt'][0][0][i] + sbrtmean['dark'][0][0][i])
else:
fracsdenmeandarkdfncsubt = 1.
setattr(gmodstat, 'fracsdenmeandarkdfncsubten%02d' % i, np.array([fracsdenmeandarkdfncsubt]))
if 'dark' in gmod.listnamegcom:
booldfncsubt = float(np.where(sbrtmean['dfncsubt'][0][0] > sbrtmean['dark'][0][0])[0].any())
else:
booldfncsubt = 1.
setattr(gmodstat, 'booldfncsubt', np.array([booldfncsubt]))
# find the 1-point function of the count maps of all emission components including the total emission
for name in gmod.listnamegcom:
namehistcntp = 'histcntp' + name
for m in gdat.indxevtt:
if gdat.numbevtt > 1:
namehistcntp += 'evt%d' % m
for i in gdat.indxener:
if gdat.numbener > 1:
namehistcntp += 'en%02d' % i
histcntp = np.histogram(cntp[name][i, :, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, namehistcntp, histcntp)
if False and i == 0 and m == 0 and (name == 'dfnc' or name == 'dfncsubt'):
for strgbins in ['lowr', 'higr']:
strgtemp = 'histcntp' + strgbins + name + 'en%02devt%d' % (i, m)
if strgbins == 'lowr':
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[:gdat.numbtickcbar-1]))]))
else:
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[gdat.numbtickcbar-1:]))]))
else:
histcntp = np.histogram(cntp[name][:, 0, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, 'histcntp' + name + 'evt%d' % m, histcntp)
if gmod.boollens:
if strgmodl == 'true':
s2nr = []
s2nr = cntp['lens'] / np.sqrt(cntp['modl'])
setattr(gmodstat, 's2nr', s2nr)
cntplensgrad = np.empty((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt, 2))
for i in gdat.indxener:
for m in gdat.indxevtt:
cntplenstemp = np.zeros(gdat.numbpixlcart)
cntplenstemp[gdat.indxpixlrofi] = cntp['lens'][i, :, m]
cntplensgrad[i, :, m, :] = retr_gradmaps(gdat, cntplenstemp) * gdat.sizepixl
cntplensgradmgtd = np.sqrt(np.sum(cntplensgrad**2, axis=3))
cntplensgrad *= gdat.sizepixl
indx = np.where(np.fabs(cntplensgrad) > 1. * gdat.sizepixl)
cntplensgrad[indx] = np.sign(cntplensgrad[indx]) * 1. * gdat.sizepixl
deflmgtd = np.sqrt(np.sum(defl**2, axis=1))
setattr(gmodstat, 'deflmgtd', deflmgtd)
setattr(gmodstat, 'cntplensgrad', cntplensgrad)
setattr(gmodstat, 'cntplensgradmgtd', cntplensgradmgtd)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
#### spectra
if gdat.boolbinsspat:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['specplot'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], \
curv=gmodstat.dictelem[l]['curv'], expc=gmodstat.dictelem[l]['expc'], \
sindcolr=sindcolr, spectype=gmod.spectype[l], plot=True)
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
# temp
try:
gmodstat.dictelem[l]['sbrt0018'] = gdat.sbrt0018objt(gmodstat.dictelem[l]['bgal'], gmodstat.dictelem[l]['lgal'])
except:
gmodstat.dictelem[l]['sbrt0018'] = gmodstat.dictelem[l]['bgal'] * 0.
if gmod.typeelem[l] == 'lens':
#### distance to the source
if gmod.boollens:
gmodstat.dictelem[l]['diss'] = retr_angldist(gdat, gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], lgalsour, bgalsour)
if gmod.boollenssubh:
gmodstat.dictelem[l]['deflprof'] = np.empty((gdat.numbanglfull, gmodstat.numbelem[l]))
gmodstat.dictelem[l]['mcut'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['rele'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reln'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relk'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relf'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reld'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relc'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relm'] = np.empty(gmodstat.numbelem[l])
# temp -- this can be placed earlier in the code
cntplensobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
cntp['lens'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
for k in np.arange(gmodstat.numbelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
#### deflection profiles
gmodstat.dictelem[l]['deflprof'][:, k] = retr_deflcutf(gdat.meanpara.anglfull, gmodstat.dictelem[l]['defs'][k], asca, acut)
### truncated mass
gmodstat.dictelem[l]['mcut'][k] = retr_mcut(gdat, gmodstat.dictelem[l]['defs'][k], asca, acut, adishost, mdencrit)
#### dot product with the source flux gradient
# temp -- weigh the energy and PSF bins
gmodstat.dictelem[l]['rele'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relf'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, cntpmodl=cntp['modl'][0, :, 0])
deflelem = retr_defl(gdat, gdat.indxpixl, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], gmodstat.dictelem[l]['defs'][k], asca=asca, acut=acut)
bgalprim = gdat.bgalgrid - deflelem[:, 1]
lgalprim = gdat.lgalgrid - deflelem[:, 0]
gmodstat.dictelem[l]['relm'][k] = np.mean(abs(cntp['lens'][0, :, 0] - cntplensobjt(bgalprim, lgalprim, grid=False).flatten()))
gmodstat.dictelem[l]['relk'][k] = gmodstat.dictelem[l]['relm'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reln'][k] = gmodstat.dictelem[l]['rele'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reld'][k] = retr_rele(gdat, gdat.cntpdata[0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relc'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, absv=False) / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
### distribution of element parameters and features
#### calculate the model filter
listindxelemfilt = [[[] for l in gmod.indxpopl] for namefilt in gdat.listnamefilt]
for k, namefilt in enumerate(gdat.listnamefilt):
for l in gmod.indxpopl:
if namefilt == '':
listindxelemfilt[k][l] = np.arange(gmodstat.numbelem[l])
if namefilt == 'imagbndr':
listindxelemfilt[k][l] = np.where((np.fabs(gmodstat.dictelem[l]['lgal']) < gdat.maxmgangdata) & (np.fabs(gmodstat.dictelem[l]['bgal']) < gdat.maxmgangdata))[0]
if namefilt == 'deltllik':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['deltllik'] > 0.5 * gmod.numbparagenrelemsing[l])[0]
if namefilt == 'nrel':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['reln'] > 0.3)[0]
for l in gmod.indxpopl:
# histograms of element parameters
for namefrst in gmod.namepara.elem[l]:
## one dimensional
if namefrst[:-4] == 'etag':
continue
if namefrst == 'specplot' or namefrst == 'deflprof':
continue
elif namefrst == 'spec':
histfrst = np.zeros((gdat.numbbinsplot, gdat.numbener))
for i in gdat.indxener:
histfrst[:, i] = np.histogram(gmodstat.dictelem[l]['spec'][i, listindxelemfilt[0][l]], gdat.binspara.spec)[0]
elif namefrst == 'cnts':
histfrst = np.histogram(gmodstat.dictelem[l]['cnts'][listindxelemfilt[0][l]], gdat.binspara.cnts)[0]
else:
#elif not (namefrst == 'curv' and gmod.spectype[l] != 'curv' or namefrst == 'expc' \
# and gmod.spectype[l] != 'expc' or namefrst.startswith('sindarry') and \
# gmod.spectype[l] != 'colr'):
binsfrst = getattr(gdat.binspara, namefrst)
#if len(gmodstat.dictelem[l][namefrst]) > 0 and len(listindxelemfilt[0][l]) > 0:
histfrst = np.histogram(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], binsfrst)[0]
strgvarb = 'hist' + namefrst + 'pop%d' % l
setattr(gmodstat, strgvarb, histfrst)
#### two dimensional
for nameseco in gmod.namepara.elem[l]:
if namefrst == 'spec' or namefrst == 'specplot' or namefrst == 'deflprof' or \
nameseco == 'spec' or nameseco == 'specplot' or nameseco == 'deflprof':
continue
if not checstrgfeat(namefrst, nameseco):
continue
binsseco = getattr(gdat.binspara, nameseco)
histtdim = np.histogram2d(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], \
gmodstat.dictelem[l][nameseco][listindxelemfilt[0][l]], [binsfrst, binsseco])[0]
setattr(gmodstat, 'hist' + namefrst + nameseco + 'pop%d' % l, histtdim)
### priors on element parameters and features
for nameparagenrelem in gmod.namepara.genrelem[l]:
xdat = gmodstat.dictelem[l][nameparagenrelem]
minm = getattr(gmod.minmpara, nameparagenrelem + 'pop%d' % l)
maxm = getattr(gmod.maxmpara, nameparagenrelem + 'pop%d' % l)
scal = getattr(gmod.scalpara, nameparagenrelem + 'pop%d' % l)
booltemp = False
if scal.startswith('expo') or scal.startswith('dexp'):
if scal.startswith('expo'):
if scal == 'expo':
sexp = getattr(gmod, 'gangdistsexppop%d' % l)
else:
sexp = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
pdfn = pdfn_expo(xdat, maxm, sexp)
if scal.startswith('dexp'):
pdfn = pdfn_dnp.exp(xdat, maxm, scal)
booltemp = True
if scal.startswith('self') or scal.startswith('logt'):
if scal.startswith('self'):
pdfn = 1. / (maxm - minm) + np.zeros_like(xdat)
else:
pdfn = 1. / (np.log(maxm) - np.log(minm)) + np.zeros_like(xdat)
booltemp = True
# temp
if scal.startswith('powr'):
slop = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem + 'pop%d' % l)]
pdfn = pdfn_powr(xdat, minm, maxm, slop)
booltemp = True
if scal.startswith('dpowslopbrek'):
pdfn = pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr)
booltemp = True
if scal == 'lnormeanstdv':
pdfn = pdfn_lnor(xdat, meanlnor, stdvlnor)
booltemp = True
if scal.startswith('igam'):
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
pdfn = pdfn_igam(xdat, slop, cutf)
booltemp = True
if scal.startswith('gaus'):
# this does not work for mismodeling
meanvarb = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
stdv = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
if nameparagenrelem == 'expc' and gmod.spectype[l] == 'expc':
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
else:
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
booltemp = True
# temp -- meanelem will not be defined
#if booltemp:
# gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'] = gmodstat.numbelem[l] * pdfn * np.interp(xdat, xdatplot, delt)
#setattr(gmodstat, 'hist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
#if strgmodl == 'true':
# setattr(gmodstat, 'refrhist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gmodstat.numbelem[l] > 0:
## total truncated mass of the subhalo as a cross check
# temp -- generalize
asca = gmodstat.dictelem[l]['asca']
acut = gmodstat.dictelem[l]['acut']
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
masssubh = np.array([np.sum(factmcutfromdefs * gmodstat.dictelem[l]['defs'])])
## derived variables as a function of other derived variables
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpntspuls'):
massshel = np.empty(gdat.numbanglhalf)
for k in gdat.indxanglhalf:
indxelemshel = np.where((gdat.binspara.anglhalf[k] < gmodstat.dictelem[l]['gang']) & (gmodstat.dictelem[l]['gang'] < gdat.binspara.anglhalf[k+1]))
massshel[k] = np.sum(gmodstat.dictelem[l]['mass'][indxelemshel])
setattr(gmodstat, 'massshelpop%d' % l, massshel)
if gmod.boollens or gmod.numbparaelem > 0 and gmod.boollenssubh:
# find the host, subhalo masses and subhalo mass fraction as a function of halo-centric radius
listnametemp = gdat.liststrgcalcmasssubh
listnamevarbmass = []
listnamevarbmassscal = []
listnamevarbmassvect = []
for e in gmod.indxsersfgrd:
if boolllenshost:
listnamevarbmassscal += ['masshosttotl']
for strgtemp in listnametemp:
listnamevarbmassvect.append('masshostisf%d' % e + strgtemp)
listnamevarbmassscal.append('masshostisf%d' % e + strgtemp + 'bein')
if gmod.numbparaelem > 0 and gmod.boollenssubh:
listnamevarbmassscal.append('masssubhtotl')
listnamevarbmassscal.append('fracsubhtotl')
for strgtemp in listnametemp:
listnamevarbmassvect.append('masssubh' + strgtemp)
listnamevarbmassvect.append('fracsubh' + strgtemp)
listnamevarbmassscal.append('masssubh' + strgtemp + 'bein')
listnamevarbmassscal.append('fracsubh' + strgtemp + 'bein')
for name in listnamevarbmassvect:
dicttert[name] = np.zeros(gdat.numbanglhalf)
if 'isf' in name:
indxisfrtemp = int(name.split('isf')[1][0])
angl = np.sqrt((gdat.meanpara.lgalcartmesh - lgalhost[indxisfrtemp])**2 + (gdat.meanpara.bgalcartmesh - bgalhost[indxisfrtemp])**2).flatten()
for k in gdat.indxanglhalf:
if name[4:8] == 'host':
convtemp = conv[:]
if name[4:8] == 'subh':
convtemp = convelem[:]
if name.endswith('delt'):
indxpixl = np.where((gdat.binspara.anglhalf[k] < angl) & (angl < gdat.binspara.anglhalf[k+1]))[0]
dicttert[name][k] = 1e6 * np.sum(convtemp[indxpixl]) * mdencrit * \
gdat.apix * adishost**2 / 2. / np.pi * gdat.deltanglhalf[k] / gdat.meanpara.anglhalf[k]
if name.endswith('intg'):
indxpixl = np.where(angl < gdat.meanpara.anglhalf[k])[0]
dicttert[name][k] = np.sum(convtemp[indxpixl]) * mdencrit * gdat.apix * adishost**2
if name[:4] == 'frac':
masshosttotl = 0.
for e in gmod.indxsersfgrd:
masshosttotl += dicttert['masshostisf%d' % e + name[-4:]][k]
if masshosttotl != 0.:
dicttert['fracsubh' + name[8:]][k] = dicttert['masssubh' + name[8:]][k] / masshosttotl
setattr(gmodstat, name, dicttert[name])
# interpolate the host, subhalo masses and subhalo mass fraction at the Einstein radius and save it as a scalar variable
dicttert[name + 'bein'] = np.interp(beinhost, gdat.meanpara.anglhalf, dicttert[name])
setattr(gmodstat, name + 'bein', dicttert[name + 'bein'])
#if gmod.numbparaelem > 0:
# ## copy element parameters to the global object
# feat = [[] for l in gmod.indxpopl]
# for l in gmod.indxpopl:
# feat[l] = dict()
# for strgfeat in gmod.namepara.genrelem[l]:
# if strgfeat[:-4] == 'etag':
# continue
# if len(gmodstat.dictelem[l][strgfeat]) > 0:
# if strgmodl == 'true':
# shap = list(np.ones(gmodstat.dictelem[l][strgfeat].ndim, dtype=int))
# feat[l][strgfeat] = np.tile(gmodstat.dictelem[l][strgfeat], [3] + shap)
# if strgmodl == 'fitt':
# feat[l][strgfeat] = gmodstat.dictelem[l][strgfeat]
#
# #for strgfeat in gmod.namepara.elem:
# # feattemp = [[] for l in gmod.indxpopl]
# # for l in gmod.indxpopl:
# # if strgfeat in gmod.namepara.genrelem[l]:
# # if strgfeat in feat[l]:
# # feattemp[l] = feat[l][strgfeat]
# # else:
# # feattemp[l] = np.array([])
# # setattr(gmodstat, strgfeat, feattemp)
# copy true state to the reference state
#if strgmodl == 'true':
# for name, valu in deepcopy(gdat.__dict__).items():
# if name.startswith('true'):
# #indx = name.find('pop')
# #if indx != -1 and not name.endswith('pop') and name[indx+3].isdigit():
# # namerefr = name.replace('pop%s' % name[indx+3], 'ref%s' % name[indx+3])
# #else:
# # namerefr = name
# #namerefr = name
# #namerefr = namerefr.replace('true', 'refr')
# name = name.replace('true', 'refr')
# setattr(gdat, name, valu)
if gmod.numbparaelem > 0 and gdat.priofactdoff != 0.:
if strgmodl == 'true':
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':
continue
reca = np.zeros(gdat.numbbinsplot) - 1.
indxelempars = np.where(gmodstat.dictelem[q]['deltllik'] > 2.5)[0]
refrhistpars = np.zeros(gdat.numbbinsplot) - 1.
histparaelem = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
indxrefrgood = np.where(histparaelem > 0)[0]
reca[indxrefrgood] = 0.
refrhistpars[indxrefrgood] = 0.
refrhist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
bins = getattr(gdat.binspara, strgfeat)
if len(indxelempars) > 0:
refrhistpars = np.histogram(gmodstat.dictelem[q][strgfeat][indxelempars], bins=bins)[0].astype(float)
if indxrefrgood.size > 0:
reca[indxrefrgood] = refrhistpars[indxrefrgood] / refrhist[indxrefrgood]
setattr(gmodstat, 'histpars' + strgfeat + 'pop%d' % q, refrhistpars)
setattr(gmodstat, 'reca' + strgfeat + 'pop%d' % q, reca)
print('gdat.rtagmock')
print(gdat.rtagmock)
if gdat.rtagmock is not None:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':# or strgfeat.startswith('aerr'):
continue
if strgfeat in gmod.namepara.genrelem[l]:
hist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % l)
reca = getattr(gdat.true.this, 'reca' + strgfeat + 'pop%d' % l)
histcorrreca = hist / reca
setattr(gmodstat, 'histcorrreca' + strgfeat + 'pop%d' % l, histcorrreca)
### Exculusive comparison with the true state
if strgmodl == 'fitt' and gdat.typedata == 'mock':
if gmod.boollens:
numbsingcomm = min(deflsing.shape[2], gmod.deflsing.shape[2])
deflsingresi = deflsing[0, ..., :numbsingcomm] - gmod.deflsing[..., :numbsingcomm]
deflsingresimgtd = np.sqrt(np.sum(deflsingresi**2, axis=1))
deflsingresiperc = 100. * deflsingresimgtd / gmod.deflsingmgtd[..., :numbsingcomm]
setattr(gmodstat, 'numbsingcomm', numbsingcomm)
setattr(gmodstat, 'deflsingresi', deflsingresi)
truedeflmgtd = getattr(gdat.true.this, 'deflmgtd')
truedefl = getattr(gdat.true.this, 'defl')
deflresi = defl - truedefl
deflresimgtd = np.sqrt(np.sum(deflresi**2, axis=1))
deflresiperc = 100. * deflresimgtd / truedeflmgtd
setattr(gmodstat, 'deflresi', deflresi)
setattr(gmodstat, 'deflresimgtd', deflresimgtd)
if gmod.numbparaelem > 0:
trueconvelem = getattr(gdat.true.this, 'convelem')
convelemresi = convelem[:] - trueconvelem
convelemresiperc = 100. * convelemresi / trueconvelem
setattr(gmodstat, 'convelemresi', convelemresi)
setattr(gmodstat, 'convelemresiperc', convelemresiperc)
truemagn = getattr(gdat.true.this, 'magn')
magnresi = magn[:] - truemagn
magnresiperc = 100. * magnresi / truemagn
setattr(gmodstat, 'magnresi', magnresi)
setattr(gmodstat, 'magnresiperc', magnresiperc)
if gmod.numbparaelem > 0:
# correlate the catalog sample with the reference catalog
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] > 0:
cmpl = np.array([float(len(indxelemrefrasschits[q][l])) / gdat.refr.numbelem[q]])
if gdat.booldiagmode:
if cmpl > 1. or cmpl < 0.:
raise Exception('')
else:
cmpl = np.array([-1.])
setattr(gmodstat, 'cmplpop%dpop%d' % (l, q), cmpl)
if gmodstat.numbelem[l] > 0:
fdis = np.array([float(indxelemfittasscfals[q][l].size) / gmodstat.numbelem[l]])
if gdat.booldiagmode:
if fdis > 1. or fdis < 0.:
raise Exception('')
else:
fdis = np.array([-1.])
setattr(gmodstat, 'fdispop%dpop%d' % (q, l), fdis)
# collect the associated fitting element parameter for each reference element
featrefrassc = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
featrefrassc[q][l] = dict()
for strgfeat in gdat.refr.namepara.elem[q]:
if not strgfeat in gmod.namepara.genrelem[l] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
if isinstance(gmodstat.dictelem[l][strgfeat], np.ndarray) and gmodstat.dictelem[l][strgfeat].ndim > 1:
continue
featrefrassc[q][l][strgfeat] = np.zeros(gdat.refr.numbelem[q]) + np.nan
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][strgfeat]) > 0:
featrefrassc[q][l][strgfeat][indxelemrefrasschits[q][l]] = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
name = strgfeat + 'asscpop%dpop%d' % (q, l)
setattr(gmodstat, name, featrefrassc[q][l][strgfeat])
# completeness
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
l = gdat.refr.indxpoplfittassc[q]
for nameparaelemfrst in gdat.refr.namepara.elem[q]:
if nameparaelemfrst.startswith('etag'):
continue
if nameparaelemfrst == 'spec' or nameparaelemfrst == 'specplot':
continue
refrfeatfrst = gdat.refr.dictelem[q][nameparaelemfrst][0, :]
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gdat.refr.namepara.elem[q]:
if nameparaelemfrst == nameparaelemseco:
continue
if nameparaelemseco.startswith('etag'):
continue
if nameparaelemseco == 'spec' or nameparaelemseco == 'specplot':
continue
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the cmpl np.array should depend on strgmodl
cmpltdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot)) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeattdim = getattr(gdat.refr, 'hist%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q))
refrfeatseco = gdat.refr.dictelem[q][nameparaelemseco][0, :]
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
refrhistfeattdimassc = np.histogram2d(refrfeatfrst[indxelemrefrasschits[q][l]], \
refrfeatseco[indxelemrefrasschits[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(refrhistfeattdim != 0.)
if indxgood[0].size > 0:
cmpltdim[indxgood] = refrhistfeattdimassc[indxgood].astype(float) / refrhistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((cmpltdim[indxgood] > 1.) | (cmpltdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q), cmpltdim)
cmplfrst = np.zeros(gdat.numbbinsplot) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeatfrst = getattr(gdat.refr, 'hist' + nameparaelemfrst + 'pop%d' % q)
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
refrhistfeatfrstassc = np.histogram(refrfeatfrst[indxelemrefrasschits[q][l]], bins=binsfeatfrst)[0]
indxgood = np.where(refrhistfeatfrst != 0.)[0]
if indxgood.size > 0:
cmplfrst[indxgood] = refrhistfeatfrstassc[indxgood].astype(float) / refrhistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((cmplfrst[indxgood] > 1.) | (cmplfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%spop%d' % (nameparaelemfrst, q), cmplfrst)
# false discovery rate
for l in gmod.indxpopl:
q = gmod.indxpoplrefrassc[l]
for nameparaelemfrst in gmod.namepara.elem[l]:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gmod.namepara.elem[l]:
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the fdis np.array should depend on strgmodl
fdistdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot))
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemseco]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
strgfeattdim = nameparaelemfrst + nameparaelemseco + 'pop%d' % l
fitthistfeattdim = getattr(gmodstat, 'hist' + strgfeattdim)
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
fitthistfeattdimfals = np.histogram2d(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], \
gmodstat.dictelem[l][nameparaelemseco][indxelemfittasscfals[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(fitthistfeattdim != 0.)
if indxgood[0].size > 0:
fdistdim[indxgood] = fitthistfeattdimfals[indxgood].astype(float) / fitthistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((fdistdim[indxgood] > 1.) | (fdistdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%s%spop%d' % (nameparaelemfrst, nameparaelemseco, l), fdistdim)
fdisfrst = np.zeros(gdat.numbbinsplot)
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
fitthistfeatfrstfals = np.histogram(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], bins=binsfeatfrst)[0]
fitthistfeatfrst = getattr(gmodstat, 'hist' + nameparaelemfrst + 'pop%d' % l)
indxgood = np.where(fitthistfeatfrst != 0.)[0]
if indxgood.size > 0:
fdisfrst[indxgood] = fitthistfeatfrstfals[indxgood].astype(float) / fitthistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((fdisfrst[indxgood] > 1.) | (fdisfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%spop%d' % (nameparaelemfrst, l), fdisfrst)
# temp
if strgmodl == 'true' and gdat.typeverb > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
if np.where(minm > gmodstat.dictelem[l][strgfeat])[0].size > 0 or np.where(maxm < gmodstat.dictelem[l][strgfeat])[0].size > 0:
print('Warning: element parameter outside the plot limits.')
print('l')
print(l)
print('Feature: ')
print(strgfeat)
print('Plot minmimum')
print(minm)
print('Plot maxmimum')
print(maxm)
if strgfeat == gmod.nameparagenrelemampl[l] and strgfeat in gmod.namepara.genrelem[l]:
gmod.indxparagenrelemtemp = gmod.namepara.genrelem[l].index(strgfeat)
if (gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp] != 'gaus' and not gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp].startswith('lnor')):
raise Exception('')
stopchro(gdat, gdatmodi, 'tert')
def retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, paragenrscalfull, dictelem, numbelem):
gmod = getattr(gdat, strgmodl)
if strgpdfn == 'self':
minmfeat = getattr(gmod.minmpara, strgfeat)
maxmfeat = getattr(gmod.maxmpara, strgfeat)
lpri = numbelem[l] * np.log(1. / (maxmfeat - minmfeat))
if strgpdfn == 'logt':
lpri = retr_lprilogtdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'gaus':
lpri = retr_lprigausdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dexp':
maxmbgal = getattr(gmod, 'maxmbgal')
gmod.indxpara.bgaldistscal = getattr(gmod.indxpara, 'bgaldistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_dnp.exp(dictelem[l]['bgal'], maxmbgal, paragenrscalfull[gmod.indxpara.bgaldistscal])))
if strgpdfn == 'expo':
maxmgang = getattr(gmod, 'maxmgang')
gang = retr_gang(dictelem[l]['lgal'], dictelem[l]['bgal'])
gmod.indxpara.gangdistscal = getattr(gmod.indxpara, 'gangdistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_expo(gang, maxmgang, paragenrscalfull[gmod.indxpara.gangdistscal])))
lpri = -numbelem[l] * np.log(2. * pi)
if strgpdfn == 'tmpl':
lpri = np.sum(lpdfspatprioobjt(dictelem[l]['bgal'], dictelem[l]['lgal'], grid=False))
if strgpdfn == 'powr':
lpri = retr_lpripowrdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dpowslopbrek':
lpri = retr_lpridpowdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dsrcexpo':
lpri += -np.sum(np.sqrt((dictelem[l]['lgal'] - lgalsour)**2 + (dictelem[l]['bgal'] - bgalsour)**2) / \
getattr(gmod, 'dsrcdistsexppop%d' % l))
if strgpdfn == 'tmpl':
if strgpdfn.endswith('cons'):
pdfnspatpriotemp = getattr(gmod, 'pdfnspatpriotemp')
spatdistcons = paragenrscalfull[getattr(gmod.indxpara, 'spatdistcons')]
lpdfspatprio, lpdfspatprioobjt = retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons)
lpdfspatpriointp = lpdfspatprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)
lpdfspatpriointp = lpdfspatpriointp.T
setattr(gmodstat, 'lpdfspatpriointp', lpdfspatpriointp)
setattr(gmodstat, 'lpdfspatprioobjt', lpdfspatprioobjt)
else:
lpdfspatprioobjt = gmod.lpdfspatprioobjt
return lpri
def checstrgfeat(strgfrst, strgseco):
numbfrst = len(strgfrst)
numbseco = len(strgseco)
numb = min(numbfrst, numbseco)
if strgfrst[:numb] < strgseco[:numb]:
booltemp = True
elif strgfrst[:numb] == strgseco[:numb]:
if numbfrst >= numbseco:
booltemp = False
else:
booltemp = True
else:
booltemp = False
return booltemp
def retr_pathoutprtag(pathpcat, rtag):
pathoutprtag = pathpcat + '/data/outp/' + rtag + '/'
return pathoutprtag
def proc_finl(gdat=None, rtag=None, strgpdfn='post', listnamevarbproc=None, forcplot=False):
gdatmock = None
print('proc_finl()')
if rtag is None:
rtag = gdat.rtag
# determine if the final-processing if nominal or tiling
if isinstance(rtag, list):
listrtagmodi = rtag
rtagfinl = tdpy.retr_strgtimestmp() + rtag[0][15:] + 'tile'
booltile = True
else:
listrtagmodi = [rtag]
rtagfinl = rtag
booltile = False
# determine of the gdatfinl object is available
boolgdatfinl = chec_statfile(pathpcat, rtagfinl, 'gdatfinlpost')
boolgdatfinlgood = False
if boolgdatfinl:
print('Final-processing has been performed previously.')
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
try:
gdat = readfile(path)
boolgdatfinlgood = True
except:
print('gdatfinl object is corrupted.')
if boolgdatfinl and boolgdatfinlgood:
# read gdatfinl
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
gdatfinl = readfile(path)
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
else:
if booltile:
gdatfinltile = tdpy.gdatstrt()
indxrtaggood = []
liststrgtile = []
listrtaggood = []
indxtiletemp = 0
for n, rtagmodi in enumerate(listrtagmodi):
# read gdatinit
boolgdatinit = chec_statfile(pathpcat, rtagmodi, 'gdatinit')
if not boolgdatinit:
if booltile:
print('Initial global object not found. Skipping...')
continue
else:
print('Initial global object not found. Quitting...')
return
pathoutprtag = retr_pathoutprtag(pathpcat, rtagmodi)
path = pathoutprtag + 'gdatinit'
gdatinit = readfile(path)
if booltile:
gdatfinltile = gdatinit
gdatfinl = gdatinit
else:
gdatfinl = gdatinit
pathoutprtagmodi = retr_pathoutprtag(pathpcat, rtagmodi)
listgdatmodi = []
for k in gdatinit.indxproc:
path = pathoutprtagmodi + 'gdatmodi%04d' % k + strgpdfn
listgdatmodi.append(readfile(path))
# erase
gdatdictcopy = deepcopy(gdatinit.__dict__)
for strg, valu in gdatdictcopy.items():
if strg.startswith('fitt.indxpara.'):
delattr(gdatinit, strg)
if gdatinit.boolmockonly:
print('Mock only run. Quitting final-processing...')
return
# read gdatmodi
print('rtagmodi')
print(rtagmodi)
boolgdatmodi = chec_statfile(pathpcat, rtagmodi, 'gdatmodipost')
if not boolgdatmodi:
print('Modified global object not found. Quitting final-processing...')
return
## list of other parameters to be flattened
gdatinit.liststrgvarbarryflat = deepcopy(listgdatmodi[0].liststrgvarbarry)
# temp
#for strg in ['memoresi']:
# gdatinit.liststrgvarbarryflat.remove(strg)
listparagenrscalfull = np.empty((gdatinit.numbsamptotl, gdatinit.fitt.maxmnumbpara))
if booltile:
gdatfinltile.pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
numbsamptotlrsmp = gdatinit.numbsamptotl
indxsamptotlrsmp = np.random.choice(gdatinit.indxsamptotl, size=gdatinit.numbsamptotl, replace=False)
# aggregate samples from the chains
if gdatinit.typeverb > 0:
print('Reading gdatmodi objects from all processes...')
timeinit = gdatinit.functime()
if gdatinit.typeverb > 0:
timefinl = gdatinit.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatinit.fitt.numbparaelem > 0:
if len(getattr(listgdatmodi[0], 'list' + strgpdfn + 'gmodstat.indxelemfull')) == 0:
print('Found an empty element list. Skipping...')
continue
if gdatinit.typeverb > 0:
print('Accumulating np.arrays...')
timeinit = gdatinit.functime()
for strgvarb in gdatinit.liststrgvarbarryflat:
for k in gdatinit.indxproc:
if k == 0:
shap = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb).shape
shap = [shap[0], gdatinit.numbproc] + list(shap[1:])
temp = np.zeros(shap) - 1
if len(shap) > 2:
temp[:, k, :] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
else:
temp[:, k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, temp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatfinl.typeverb > 0:
print('Accumulating lists...')
timeinit = gdatfinl.functime()
# lists of lists collected at each sample
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = [[[] for k in gdatfinl.indxproc] for j in gdatfinl.indxsamp]
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp[j][k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)[j]
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if not booltile:
## np.maximum likelihood sample
gdatfinl.maxmllikproc = np.empty(gdatfinl.numbproc)
gdatfinl.indxswepmaxmllikproc = np.empty(gdatfinl.numbproc, dtype=int)
gdatfinl.sampmaxmllikproc = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.maxmllikproc[k] = listgdatmodi[k].maxmllikswep
gdatfinl.indxswepmaxmllikproc[k] = listgdatmodi[k].indxswepmaxmllik
gdatfinl.sampmaxmllikproc[k, :] = listgdatmodi[k].sampmaxmllik
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
# Gelman-Rubin test
if gdatfinl.numbproc > 1:
if gdatfinl.typeverb > 0:
print('Computing the Gelman-Rubin TS...')
timeinit = gdatfinl.functime()
gdatfinl.gmrbparagenrscalbase = np.zeros(gdatfinl.fitt.numbparagenrbase)
gdatfinl.gmrbstat = np.zeros((gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
for k in gdatfinl.fitt.indxparagenrbase:
gdatfinl.gmrbparagenrscalbase[k] = tdpy.mcmc.gmrb_test(listparagenrscalfull[:, :, k])
if not np.isfinite(gdatfinl.gmrbparagenrscalbase[k]):
gdatfinl.gmrbparagenrscalbase[k] = 0.
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
for i in gdatfinl.indxener:
for j in gdatfinl.indxpixl:
for m in gdatfinl.indxevtt:
gdatfinl.gmrbstat[i, j, m] = tdpy.mcmc.gmrb_test(listcntpmodl[:, :, i, j, m])
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# calculate the autocorrelation of the chains
if gdatfinl.typeverb > 0:
print('Computing the autocorrelation of the chains...')
timeinit = gdatfinl.functime()
gdatfinl.atcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
gdatfinl.atcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.atcrpara[k, :, :], gdatfinl.timeatcrpara[k, :] = tdpy.mcmc.retr_timeatcr(listparagenrscalfull[:, k, :], typeverb=gdatfinl.typeverb)
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
gdatfinl.atcrcntp[k, :], gdatfinl.timeatcrcntp[k, :] = tdpy.mcmc.retr_timeatcr(listcntpmodl[:, k, :, :, :], typeverb=gdatfinl.typeverb)
timeatcrcntpmaxm = np.amax(gdatfinl.timeatcrcntp)
gdatfinl.timeatcrcntpmaxm = np.amax(timeatcrcntpmaxm)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
setattr(gdatfinl, 'list' + strgpdfn + 'sampproc', np.copy(getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')))
# flatten the list chains from different walkers
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = []
listinpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp.append(listinpt[j][k])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
# flatten the np.array chains from different walkers
for strgvarb in gdatinit.liststrgvarbarryflat:
inpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
shap = [inpt.shape[0] * inpt.shape[1]] + list(inpt.shape[2:])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, inpt.reshape(shap))
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
if booltile:
liststrgtile.append(rtagmodi.split('_')[-2][-4:])
listrtaggood.append(rtagmodi)
indxrtaggood.append(n)
indxtiletemp += 1
if len(liststrgtile) == 1:
for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
refrfeattile = [[] for q in gdatfinl.indxrefr]
setattr(gdatfinl, 'refr' + strgfeat, refrfeattile)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = []
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarb)
else:
hist = np.zeros_like(getattr(listgdatmodi[0], 'list' + strgpdfn + strgvarb))
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, hist)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
setattr(gdatfinl, name, np.zeros_like(getattr(gdatfinl, name)))
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeattile = getattr(gdatfinl, 'refr' + strgfeat)
# #refrfeat = getattr(gdatfinl, 'refr' + strgfeat)
# refrfeat = [[] for q in gdatfinl.indxrefr]
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q].append(refrfeattile[q])
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
# temp
if 'spec' in strgvarb:
continue
hist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
hist += getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
hist = getattr(gdatfinl, name)
hist += getattr(gdatfinl, name)
print('Done with the tile number %d, run number %d...' % (indxtiletemp, n))
if booltile:
gdatfinl.pathplotrtag = gdatfinl.pathimag + rtagfinl + '/'
make_fold(gdatfinl)
indxrtaggood = np.array(indxrtaggood).astype(int)
numbrtaggood = indxrtaggood.size
numbtile = numbrtaggood
print('Found %d tiles with run tags:' % numbrtaggood)
for indxrtaggoodtemp in indxrtaggood:
print(rtag[indxrtaggoodtemp])
# np.concatenate reference elements from different tiles
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeat = getattr(gdatfinl, 'refr' + strgfeat, refrfeat)
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q] = np.concatenate(refrfeat[q], axis=1)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
if 'assc' in strgvarb:
numbrefrelemtotl = 0
for k, varbrsmp in enumerate(listvarb):
numbrefrelemtotl += varbrsmp.shape[1]
shap = [gdatfinl.numbsamptotl, numbrefrelemtotl]
listvarbtemp = np.empty(shap)
cntr = 0
for k, varb in enumerate(listvarb):
listvarbtemp[:, cntr:cntr+varb.shape[1]] = varb
cntr += varb.shape[1]
else:
shap = [gdatfinl.numbsamptotl * numbtile] + list(listvarb[0].shape[1:])
listvarbtemp = np.empty(shap)
for k, varb in enumerate(listvarb):
listvarbtemp[k*gdatfinl.numbsamptotl:(k+1)*gdatfinl.numbsamptotl, ...] = varb
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarbtemp)
else:
# np.maximum likelihood sample
if gdatfinl.fitt.numbparaelem > 0:
listindxelemfull = getattr(gdatfinl, 'list' + strgpdfn + 'indxelemfull')
listllik = getattr(gdatfinl, 'list' + strgpdfn + 'llik')
listlliktotl = getattr(gdatfinl, 'list' + strgpdfn + 'lliktotl')
indxsamptotlmlik = np.argmax(np.sum(np.sum(np.sum(listllik, 3), 2), 1))
# copy the np.maximum likelihood sample
for strgvarb in listgdatmodi[0].liststrgvarbarrysamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik, ...])
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik])
# temp -- dont gdatfinl.listllik and gdatfinl.listparagenrscalfull have the same dimensions?
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
#if gdatfinl.fitt.numbparaelem > 0:
# gdatfinl.mlikindxelemfull = listindxelemfull[indxsamptotlmlik]
gdatfinl.mlikparagenrscalbase = gdatfinl.mlikparagenrscalfull[gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'mlik' + gmod.nameparagenrbase, gdatfinl.mlikparagenrscalbase[k])
# add execution times to the chain output
gdatfinl.timereal = np.zeros(gdatfinl.numbproc)
gdatfinl.timeproc = np.zeros(gdatfinl.numbproc)
for k in gdatfinl.indxproc:
gdatfinl.timereal[k] = listgdatmodi[k].timereal
gdatfinl.timeproc[k] = listgdatmodi[k].timeproc
# find the np.maximum likelihood and posterior over the chains
gdatfinl.indxprocmaxmllik = np.argmax(gdatfinl.maxmllikproc)
#gdatfinl.maxmlliktotl = gdatfinl.maxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.indxswepmaxmllik = gdatfinl.indxprocmaxmllik * gdatfinl.numbparagenrfull + gdatfinl.indxswepmaxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.sampmaxmllik = gdatfinl.sampmaxmllikproc[gdatfinl.indxprocmaxmllik, :]
if strgpdfn == 'post':
levipost = retr_levipost(listlliktotl)
setattr(gdatfinl, strgpdfn + 'levipost', levipost)
if strgpdfn == 'prio':
leviprio = np.log(np.mean(np.exp(listlliktotl)))
setattr(gdatfinl, strgpdfn + 'leviprio', leviprio)
# parse the sample vector
listparagenrscalbase = listparagenrscalfull[:, gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'list' + strgpdfn + gmod.nameparagenrbase, listparagenrscalbase[:, k])
setattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalbase', listparagenrscalbase)
if strgpdfn == 'post' and gdatfinl.checprio:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdatfinlprio'
try:
gdatprio = readfile(path)
except:
proc_finl(gdat=gdatfinl, strgpdfn='prio', listnamevarbproc=listnamevarbproc, forcplot=forcplot)
else:
gdatprio = None
# post process samples
## bin element parameters
if gdatfinl.typeverb > 0:
print('Binning the probabilistic catalog spatially...')
timeinit = gdatfinl.functime()
if not booltile:
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.boolbinsspat:
histlgalbgalelemstkd = [[] for l in gdatfinl.fittindxpopl]
listlgal = getattr(gdatfinl, 'list' + strgpdfn + 'lgal')
listbgal = getattr(gdatfinl, 'list' + strgpdfn + 'bgal')
for l in gdatfinl.fittindxpopl:
if gdatfinl.fitttypeelem[l] != 'lghtline':
histlgalbgalelemstkd[l] = np.zeros((gdatfinl.numbbgalpntsprob, gdatfinl.numblgalpntsprob, gdatfinl.numbbinsplot, numb))
temparry = np.concatenate([listlgal[n][l] for n in gdatfinl.indxsamptotl])
temp = np.empty((len(temparry), 3))
temp[:, 0] = temparry
temp[:, 1] = np.concatenate([listbgal[n][l] for n in gdatfinl.indxsamptotl])
temp[:, 2] = np.concatenate([getattr(gdatfinl, 'list' + strgpdfn + strgfeat)[n][l] for n in gdatfinl.indxsamptotl])
bins = getattr(gdatfinl, 'bins' + strgfeat)
histlgalbgalelemstkd[l][:, :, :, k] = np.histogramdd(temp, \
bins=(gdatfinl.binslgalpntsprob, gdatfinl.binsbgalpntsprob, bins))[0]
setattr(gdatfinl, strgpdfn + 'histlgalbgalelemstkd', histlgalbgalelemstkd)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
## construct a condensed catalog of elements
if gdatfinl.boolcondcatl and gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typeverb > 0:
print('Constructing a condensed catalog...')
timeinit = gdatfinl.functime()
retr_condcatl(gdatfinl)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# construct lists of samples for each proposal type
listindxproptype = getattr(gdatfinl, 'list' + strgpdfn + 'indxproptype')
listboolpropaccp = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropaccp')
listboolpropfilt = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropfilt')
listindxsamptotlproptotl = []
listindxsamptotlpropfilt = []
listindxsamptotlpropaccp = []
listindxsamptotlpropreje = []
for n in gdatfinl.indxproptype:
indxsampproptype = np.where(listindxproptype == gdatfinl.indxproptype[n])[0]
listindxsamptotlproptotl.append(indxsampproptype)
listindxsamptotlpropaccp.append(np.intersect1d(indxsampproptype, np.where(listboolpropaccp)[0]))
listindxsamptotlpropfilt.append(np.intersect1d(indxsampproptype, np.where(listboolpropfilt)[0]))
listindxsamptotlpropreje.append(np.intersect1d(indxsampproptype, np.where(np.logical_not(listboolpropaccp))[0]))
if listindxsamptotlproptotl[n].size == 0:
accp = 0.
else:
accp = float(listindxsamptotlpropaccp[n].size) / listindxsamptotlproptotl[n].size
setattr(gdatfinl, 'accp' + gdatfinl.nameproptype[n], accp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlproptotl', listindxsamptotlproptotl)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropaccp', listindxsamptotlpropaccp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropreje', listindxsamptotlpropreje)
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
# posterior corrections
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
## perform corrections
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
for gmod.namepara.genrelemvarbhist in gdatfinl.liststrgvarbhist:
strgvarb = gmod.namepara.genrelemvarbhist[0]
if gmod.namepara.genrelemvarbhist[1].startswith('aerr') or len(gmod.namepara.genrelemvarbhist[2]) > 0 and gmod.namepara.genrelemvarbhist[2].startswith('aerr'):
continue
if gmod.namepara.genrelemvarbhist[1] == 'spec' or gmod.namepara.genrelemvarbhist[1] == 'deflprof' or gmod.namepara.genrelemvarbhist[1] == 'specplot':
continue
if len(gmod.namepara.genrelemvarbhist[2]) > 0 and (gmod.namepara.genrelemvarbhist[2] == 'spec' or \
gmod.namepara.genrelemvarbhist[2] == 'deflprof' or gmod.namepara.genrelemvarbhist[2] == 'specplot'):
continue
## internal correction
listhist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for qq in gdatmock.indxrefr:
l = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[1][0])
qq = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[2][0])
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and \
(len(gmod.namepara.genrelemvarbhist[2]) == 0 or gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr):
listhistincr = listhist
else:
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and len(gmod.namepara.genrelemvarbhist[2]) > 0:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (l, qq))], 2)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (qq, l))], 2)
elif len(gmod.namepara.genrelemvarbhist[2][:-4]) > 0 and gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (l, qq))], 1)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (qq, l))], 1)
else:
listcmpltrue = getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[3][qq])
listfdistrue = getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[3][qq])
if len(gmod.namepara.genrelemvarbhist[2]) == 0:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
for k in gdatfinl.indxbinsplot:
listcmplboot[:, k] = np.random.choice(listcmpltrue[:, k], size=gdatfinl.numbsampboot)
listfdisboot[:, k] = np.random.choice(listfdistrue[:, k], size=gdatfinl.numbsampboot)
listhistboot[:, k] = np.random.choice(listhist[:, k], size=gdatfinl.numbsampboot)
else:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
for a in gdatfinl.indxbinsplot:
for b in gdatfinl.indxbinsplot:
listcmplboot[:, a, b] = np.random.choice(listcmpltrue[:, a, b], size=gdatfinl.numbsampboot)
listfdisboot[:, a, b] = np.random.choice(listfdistrue[:, a, b], size=gdatfinl.numbsampboot)
listhistboot[:, a, b] = np.random.choice(listhist[:, a, b], size=gdatfinl.numbsampboot)
indxbadd = np.where(listcmplboot == -1)
indxbaddzero = np.where(listcmplboot == 0.)
listhistincr = listhistboot / listcmplboot * (1. - listfdisboot)
listhistincr[indxbadd] = -1.5
listhistincr[indxbaddzero] = 1.5
listgdatmodi[0].liststrgchan += ['incr' + gmod.namepara.genrelemvarbhist[4][qq]]
setattr(gdatfinl, 'listpostincr' + gmod.namepara.genrelemvarbhist[4][qq], listhistincr)
## external correction
for q in gdatfinl.indxrefr:
nametemp = gmod.namepara.genrelemvarbhist[1]
if len(gmod.namepara.genrelemvarbhist[2]) > 0:
nametemp += gmod.namepara.genrelemvarbhist[2]
nametemp += 'pop%dpop%dpop%d' % (q, qq, l)
crexhist = getattr(gdatfinl, 'crex' + nametemp)
if crexhist is not None:
listhistexcr = listhistincr * crexhist
if crexhist.ndim == 1 and listhistincr.ndim == 3:
raise Exception('')
listgdatmodi[0].liststrgchan += ['excr' + nametemp]
setattr(gdatfinl, 'listpostexcr' + nametemp, listhistexcr)
# compute credible intervals
if gdatfinl.typeverb > 0:
print('Computing credible intervals...')
timeinit = gdatfinl.functime()
for strgchan in listgdatmodi[0].liststrgchan:
if booltile:
if strgchan in gdatfinl.liststrgvarbarryswep or strgchan in listgdatmodi[0].liststrgvarblistsamp:
continue
if not (strgchan.startswith('hist') or strgchan.startswith('incr') or strgchan.startswith('excr')):
continue
if gdatfinl.fitt.numbparaelem > 0 and strgchan in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
if 'spec' in strgchan:
continue
if strgchan == 'spec':
continue
listtemp = getattr(gdatfinl, 'list' + strgpdfn + strgchan)
if isinstance(listtemp, list):
if booltile:
continue
# ensure that transdimensional lists are not included
# temp
if strgchan in gdatfinl.fitt.namepara.genrelemtotl or strgchan == 'indxelemfull':
continue
pctltemp = []
pmeatemp = []
meditemp = []
errrtemp = []
stdvtemp = []
numb = len(listtemp[0])
for k in range(numb):
if isinstance(listtemp[0][k], list):
continue
shap = [gdatfinl.numbsamptotl] + list(listtemp[0][k].shape)
temp = np.zeros(shap)
for n in gdatfinl.indxsamptotl:
temp[n, ...] = listtemp[n][k]
pctltempsing = tdpy.retr_pctlvarb(temp)
pmeatempsing = np.mean(temp, axis=0)
meditempsing = pctltempsing[0, ...]
errrtempsing = tdpy.retr_errrvarb(pctltempsing)
stdvtempsing = np.std(temp)
pctltemp.append(pctltempsing)
pmeatemp.append(pmeatempsing)
meditemp.append(meditempsing)
errrtemp.append(errrtempsing)
stdvtemp.append(stdvtempsing)
else:
# this is needed for finding posterior moments of features of associated reference elements
if 'asscref' in strgchan:
if listtemp.ndim != 2:
raise Exception('')
pmeatemp = np.zeros(listtemp.shape[1])
pctltemp = np.zeros([3] + [listtemp.shape[1]])
# temp -- this only works for 2D listtemp
for k in range(listtemp.shape[1]):
indxassc = np.where(np.isfinite(listtemp[:, k]))[0]
if indxassc.size > 0:
pctltemp[:, k] = tdpy.retr_pctlvarb(listtemp[indxassc, k])
pmeatemp[k] = np.mean(listtemp[indxassc, k])
else:
pctltemp = tdpy.retr_pctlvarb(listtemp)
pmeatemp = np.mean(listtemp, axis=0)
errrtemp = tdpy.retr_errrvarb(pctltemp)
stdvtemp = np.std(pctltemp, axis=0)
meditemp = pctltemp[0, ...]
if strgchan in gdatfinl.listnamevarbcpct:
cpcttemp = np.empty([gdatfinl.numbsampcpct] + [3] + list(listtemp.shape[1:]))
for n in gdatfinl.indxsampcpct:
cpcttemp[n, ...] = tdpy.retr_pctlvarb(listtemp[:n+1, ...])
setattr(gdatfinl, 'pctl' + strgpdfn + strgchan, pctltemp)
setattr(gdatfinl, 'medi' + strgpdfn + strgchan, meditemp)
setattr(gdatfinl, 'pmea' + strgpdfn + strgchan, pmeatemp)
setattr(gdatfinl, 'errr' + strgpdfn + strgchan, errrtemp)
setattr(gdatfinl, 'stdv' + strgpdfn + strgchan, stdvtemp)
if strgchan in gdatfinl.listnamevarbcpct:
setattr(gdatfinl, 'cpct' + strgpdfn + strgchan, cpcttemp)
if not booltile:
pmealliktotl = getattr(gdatfinl, 'pmea' + strgpdfn + 'lliktotl')
stdvlliktotl = getattr(gdatfinl, 'stdv' + strgpdfn + 'lliktotl')
minmlliktotl = np.amin(listlliktotl)
maxmlliktotl = np.amax(listlliktotl)
skewlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**3)
kurtlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**4)
setattr(gdatfinl, 'minm' + strgpdfn + 'lliktotl', minmlliktotl)
setattr(gdatfinl, 'maxm' + strgpdfn + 'lliktotl', maxmlliktotl)
setattr(gdatfinl, 'skew' + strgpdfn + 'lliktotl', skewlliktotl)
setattr(gdatfinl, 'kurt' + strgpdfn + 'lliktotl', kurtlliktotl)
if strgpdfn == 'post':
infopost = retr_infofromlevi(pmealliktotl, levipost)
setattr(gdatfinl, strgpdfn + 'infopost', infopost)
if strgpdfn == 'post' and gdatfinl.checprio:
leviprio = getattr(gdatprio, 'prioleviprio')
infoprio = retr_infofromlevi(pmealliktotl, leviprio)
setattr(gdatfinl, strgpdfn + 'infoprio', infoprio)
bcom = maxmlliktotl - pmealliktotl
setattr(gdatfinl, strgpdfn + 'bcom', bcom)
listnametemp = ['lliktotl']
if gmod.numbparaelem > 0:
listnametemp += ['lpripena']
for namevarbscal in listnametemp:
listtemp = getattr(gdatfinl, 'list' + strgpdfn + namevarbscal)
minm = np.amin(listtemp)
maxm = np.amax(listtemp)
setattr(gdatfinl, 'minm' + namevarbscal, minm)
setattr(gdatfinl, 'maxm' + namevarbscal, maxm)
setattr(gdatfinl, 'scal' + namevarbscal, 'self')
retr_axis(gdat, namevarbscal)
if gdatfinl.checprio:
for strgvarb in gdatfinl.listnamevarbscal:
setp_pdfnvarb(gdatfinl, strgpdfn, strgvarb, strgvarb)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
# calculate information gain
if strgpdfn == 'post':
for namevarbscal in gdatfinl.listnamevarbscal:
setp_info(gdatfinl, gdatprio, namevarbscal, namevarbscal)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# flatten the np.arrays which have been collected at each sweep
#setattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'flat', getattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'totl').flatten())
if not booltile:
# memory usage
listmemoresi = getattr(gdatfinl, 'list' + strgpdfn + 'memoresi')
gdatfinl.meanmemoresi = np.mean(listmemoresi, 1)
gdatfinl.derimemoresi = (gdatfinl.meanmemoresi[-1] - gdatfinl.meanmemoresi[0]) / gdatfinl.numbswep
gdatfinl.timerealtotl = time.time() - gdatfinl.timerealtotl
gdatfinl.timeproctotl = time.clock() - gdatfinl.timeproctotl
gdatfinl.timeproctotlswep = gdatfinl.timeproctotl / gdatfinl.numbswep
if gdatfinl.timeatcrcntpmaxm == 0.:
gdatfinl.timeprocnorm = 0.
else:
gdatfinl.timeprocnorm = gdatfinl.timeproctotlswep / gdatfinl.timeatcrcntpmaxm
# write the final gdat object
path = gdatfinl.pathoutprtag + 'gdatfinl' + strgpdfn
if gdatfinl.typeverb > 0:
print('Writing gdatfinl to %s...' % path)
writfile(gdatfinl, path)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('gdatfinl%s written.\n' % strgpdfn)
filestat.close()
if not booltile:
if gdatfinl.typeverb > 0:
for k in gdatfinl.indxproc:
print('Process %d has been completed in %d real seconds, %d CPU seconds.' % (k, gdatfinl.timereal[k], gdatfinl.timeproc[k]))
print('Parent process has run in %d real seconds, %d CPU seconds.' % (gdatfinl.timerealtotl, gdatfinl.timeproctotl))
print('HACKING!!')
gdatfinl.strgpdfn = 'post'
print('Checking whether post-processing plots already exist.')
booltemp = chec_statfile(pathpcat, rtagfinl, 'plotfinl')
if booltemp:
print('Final plots already exist. Skipping...')
else:
if strgpdfn == 'post' and gdatfinl.checprio:
path = pathoutprtag + 'gdatfinlprio'
gdatprio = readfile(path)
else:
gdatprio = None
if gdatfinl.makeplot and getattr(gdatfinl, 'makeplotfinl' + strgpdfn) or forcplot:
plot_finl(gdatfinl, gdatprio=gdatprio, strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('plotfinl%s written.\n' % strgpdfn)
filestat.close()
def retr_listgdat(listrtag, typegdat='finlpost'):
listgdat = []
for rtag in listrtag:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdat%s' % typegdat
listgdat.append(readfile(path))
return listgdat
def make_fold(gdat):
for strgpdfn in gdat.liststrgpdfn:
setattr(gdat, 'path' + strgpdfn, gdat.pathplotrtag + strgpdfn + '/')
path = getattr(gdat, 'path' + strgpdfn)
for nameseco in ['finl', 'fram', 'anim', 'opti']:
setattr(gdat, 'path' + strgpdfn + nameseco, path + nameseco + '/')
for nameseco in ['diag', 'lpac', 'varbscal', 'cond', 'varbscalproc']:
setattr(gdat, 'path' + strgpdfn + 'finl' + nameseco, path + 'finl/' + nameseco + '/')
for n in gdat.indxproptype:
setattr(gdat, 'path' + strgpdfn + 'finl' + gdat.nameproptype[n], path + 'finl/lpac/' + gdat.nameproptype[n] + '/')
for namethrd in ['hist', 'trac', 'join', 'cova']:
setattr(gdat, 'path' + strgpdfn + 'finlvarbscal' + namethrd, path + 'finl/varbscal/' + namethrd + '/')
for strgphas in gdat.liststrgphas + ['init']:
liststrgfold = getattr(gdat, 'liststrgfold' + strgphas)
for nameseco in liststrgfold:
if strgphas == 'init':
if nameseco == 'assc' or nameseco.startswith('cmpl') or nameseco.startswith('fdis'):
continue
setattr(gdat, 'path' + strgphas + nameseco[:-1], gdat.pathplotrtag + 'init/' + nameseco)
else:
setattr(gdat, 'path' + strgpdfn + strgphas + nameseco[:-1], path + strgphas + '/' + nameseco)
gdat.pathinfo = gdat.pathplotrtag + 'info/'
## make the directories
for attr, valu in gdat.__dict__.items():
if attr.startswith('path'):
os.system('mkdir -p %s' % valu)
def make_cmapdivg(strgcolrloww, strgcolrhigh):
funccolr = mpl.colors.ColorConverter().to_rgb
colrloww = funccolr(strgcolrloww)
colrhigh = funccolr(strgcolrhigh)
cmap = make_cmap([colrloww, funccolr('white'), 0.5, funccolr('white'), colrhigh])
return cmap
def make_cmap(seq):
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mpl.colors.LinearSegmentedColormap('CustomMap', cdict)
def setp_pdfnvarb(gdat, strgpdfn, name, namefull, nameseco=None):
if listvarb.ndim == 1:
shaptemp = [gdat.numbbinspdfn, 1]
else:
shaptemp = [gdat.numbbinspdfn] + list(listvarb.shape[1:])
pdfn = np.empty(shaptemp)
if listvarb.ndim == 1:
binsvarb = getattr(gdat.binspara, name)
deltvarb = getattr(gdat, 'delt' + name)
pdfn[:, 0] = np.histogram(listvarb, bins=binsvarb)[0].astype(float)
pdfn[:, 0] /= np.sum(pdfn[:, 0])
pdfn[:, 0] /= deltvarb
else:
binsvarb = np.linspace(0, gmod.maxmpara.numbelemtotl, 51)
if listvarb.ndim == 2:
for k in range(listvarb.shape[1]):
pdfn[:, k] = np.histogram(listvarb[:, k], bins=binsvarb)[0].astype(float)
pdfn[:, k] /= np.sum(pdfn[:, k])
pdfn *= 50.
if listvarb.ndim == 3:
for k in range(listvarb.shape[1]):
for m in range(listvarb.shape[2]):
pdfn[:, k, m] = np.histogram(listvarb[:, k, m], bins=binsvarb)[0].astype(float)
pdfn[:, k, m] /= np.sum(pdfn[:, k, m])
pdfn *= 2500.
pdfn[np.where(pdfn < 1e-50)[0]] = 1e-50
setattr(gdat, 'pdfn' + strgpdfn + namefull, pdfn)
def setp_info(gdat, gdatprio, name, namefull, nameseco=None, namesecofull=None):
listpost = getattr(gdat, 'listpost' + namefull)
listprio = getattr(gdatprio, 'listprio' + namefull)
pdfnpost = getattr(gdat, 'pdfnpost' + namefull)
pdfnprio = getattr(gdatprio, 'pdfnprio' + namefull)
if listpost.ndim == 3:
infodens = np.empty((gdat.numbbinspdfn, listpost.shape[1], listpost.shape[2]))
info = np.empty((listpost.shape[1], listpost.shape[2]))
pvks = np.empty((listpost.shape[1], listpost.shape[2]))
else:
if listpost.ndim == 1:
numbtemp = 1
else:
numbtemp = listpost.shape[1]
infodens = np.empty((gdat.numbbinspdfn, numbtemp))
info = np.empty(numbtemp)
pvks = np.empty(numbtemp)
if listpost.ndim == 1:
listpost = listpost[:, None]
listprio = listprio[:, None]
deltvarb = getattr(gdat, 'delt' + name)
else:
if listpost.ndim == 2:
deltvarb = 1. / 50
else:
deltvarb = 1. / 50**list2
if listpost.ndim == 1 or listpost.ndim == 2:
for k in range(listpost.shape[1]):
infodens[:, k] = retr_infodens(pdfnpost[:, k], pdfnprio[:, k])
info[k] = np.sum(infodens[:, k] * deltvarb)
temp, pvks[k] = sp.stats.ks_2samp(listpost[:, k], listprio[:, k])
if listpost.ndim == 3:
for k in range(listpost.shape[1]):
for m in range(listpost.shape[2]):
infodens[:, k, m] = retr_infodens(pdfnpost[:, k, m], pdfnprio[:, k, m])
info[k, m] = np.sum(infodens[:, k, m] * deltvarb)
temp, pvks[k, m] = sp.stats.ks_2samp(listpost[:, k, m], listprio[:, k, m])
setattr(gdat, 'pvks' + namefull, pvks)
setattr(gdat, 'infodens' + namefull, infodens)
setattr(gdat, 'info' + namefull, info)
# check the state file
def chec_statfile(pathpcat, rtag, strggdat, typeverb=1):
print('Checking the state file %s for %s...' % (strggdat, rtag))
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
# check the status file
if not os.path.isfile(pathoutprtag + 'stat.txt'):
if typeverb > 0:
print('pathoutprtag')
print(pathoutprtag)
print('stat.txt not found.')
return False
# check the global object
filestat = open(pathoutprtag + 'stat.txt', 'r')
booltemp = False
linesrch = strggdat + ' written.\n'
for line in filestat:
if line == linesrch:
booltemp = True
filestat.close()
if not booltemp:
if typeverb > 0:
print('bad %s status.' % (strggdat))
return False
else:
return True
def retr_los3(dlos, lgal, bgal):
dglc = np.sqrt(8.5e3**2 + dlos**2 - 2. * dlos * 8.5e3 * np.cos(bgal) * np.cos(lgal))
thet = np.arccos(np.sin(bgal) * dlos / dglc)
phii = np.arcsin(np.sqrt(np.cos(bgal)**2 * dlos**2 + 8.5e3**2 - 2 * dlos * np.cos(bgal) * 8.5e3) / dglc)
return dglc, thet, phii
def retr_glc3(dglc, thet, phii):
xpos = dglc * np.sin(thet) * np.cos(phii)
ypos = dglc * np.sin(thet) * np.sin(phii)
zpos = dglc * np.cos(thet)
dlos = np.sqrt(zpos**2 + xpos**2 + (8.5e3 - ypos)**2)
lgal = np.arctan2(8.5e3 - ypos, xpos) - np.pi / 2
bgal = np.arcsin(zpos / dlos)
return dlos, lgal, bgal
def retr_lumipuls(geff, magf, per0):
# temp -- this is bolometric luminosity np.whereas dictelem[l]['flux'] is differential!
lumi = 9.6e33 * (geff / 0.2) * (magf / 10**8.5)**2 * (3e-3 / per0)*4
return lumi
def retr_lumi(gdat, flux, dlos, reds=None):
lumi = flux * 4. * np.pi * dlos**2 * gdat.prsccmtr**2 / gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
lumi *= (1. + reds)**2
return lumi
def retr_flux(gdat, lumi, dlos, reds=None):
flux = lumi / 4. / np.pi / dlos**2 / gdat.prsccmtr**2 * gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
pass
return flux
def retr_per1(per0, magf):
per1 = 3.3e-20 * (magf / 10**8.5)**2 * (3e-3 / per0)
return per1
def retr_dlosgalx(lgal, bgal, dglc):
# temp -- this is obviously wrong
dlos = 8.5e3 - dglc
return dlos
def retr_arryfromlist(listtemp):
shap = [len(listtemp)] + list(listtemp[0].shape)
arry = np.empty(shap)
for k in range(len(listtemp)):
arry[k, ...] = listtemp[k]
return arry
def proc_cntpdata(gdat):
# exclude voxels with vanishing exposure
## data counts
if gdat.typedata == 'inpt':
gdat.cntpdata = retr_cntp(gdat, gdat.sbrtdata)
# data variance
gdat.varidata = np.maximum(gdat.cntpdata, 1.)
# correct the likelihoods for the constant data dependent factorial
gdat.llikoffs = -sp.special.gammaln(gdat.cntpdata + 1)
## spatial average
gdat.sbrtdatamean, gdat.sbrtdatastdv = retr_spatmean(gdat, gdat.cntpdata, boolcntp=True)
# data count limits
minmcntpdata = np.amin(gdat.cntpdata)
maxmcntpdata = np.amax(gdat.cntpdata)
minm = minmcntpdata
maxm = maxmcntpdata
setp_varb(gdat, 'cntpdata', minm=minm, maxm=maxm, lablroot='$C_{D}$', scal='asnh', strgmodl='plot')
maxm = maxmcntpdata
minm = 1e-1 * minmcntpdata
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
setp_varb(gdat, 'cntpmodl', minm=minm, maxm=maxm, strgmodl=strgmodl, scal='asnh')
print('gdat.labltickmajrpara.cntpmodl')
print(gdat.labltickmajrpara.cntpmodl)
# residual limits
maxm = np.ceil(maxmcntpdata * 0.1)
minm = -np.ceil(maxmcntpdata * 0.1)
setp_varb(gdat, 'cntpresi', minm=minm, maxm=maxm, lablroot='$C_{R}$', scal='asnh', strgmodl='plot')
# 1-point function of the data counts
for m in gdat.indxevtt:
if gdat.numbpixl > 1:
for i in gdat.indxener:
print('gdat.cntpdata[i, :, m]')
summgene(gdat.cntpdata[i, :, m])
print('gdat.binspara.cntpdata')
summgene(gdat.binspara.cntpdata)
histcntp = np.histogram(gdat.cntpdata[i, :, m], bins=gdat.binspara.cntpdata)[0]
setattr(gdat, 'histcntpdataen%02devt%d' % (i, m), histcntp)
else:
histcntp = | np.histogram(gdat.cntpdata[:, 0, m], bins=gdat.binspara.cntpdata) | numpy.histogram |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import os
import pandas as pd
import pandas.testing as tm
from fastparquet import ParquetFile
from fastparquet import write, parquet_thrift
from fastparquet import writer, encoding
from pandas.testing import assert_frame_equal
from pandas.api.types import CategoricalDtype
import pytest
from fastparquet.util import default_mkdirs
from .util import s3, tempdir, sql, TEST_DATA
from fastparquet import cencoding
def test_uvarint():
values = np.random.randint(0, 15000, size=100)
buf = np.zeros(30, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for v in values:
o.seek(0)
cencoding.encode_unsigned_varint(v, o)
o.seek(0)
out = cencoding.read_unsigned_var_int(o)
assert v == out
def test_bitpack():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
width = cencoding.width_from_max_int(values.max())
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
cencoding.encode_bitpacked(values, width, o)
o.seek(0)
head = cencoding.read_unsigned_var_int(o)
buf2 = np.zeros(300, dtype=np.int32)
out = cencoding.NumpyIO(buf2.view("uint8"))
cencoding.read_bitpacked(o, head, width, out)
assert (values == buf2[:len(values)]).all()
assert buf2[len(values):].sum() == 0 # zero padding
assert out.tell() // 8 - len(values) < 8
def test_length():
lengths = np.random.randint(0, 15000, size=100)
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for l in lengths:
o.seek(0)
o.write_int(l)
o.seek(0)
out = buf.view('int32')[0]
assert l == out
def test_rle_bp():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
buf = np.empty(len(values) + 5, dtype=np.int32)
out = cencoding.NumpyIO(buf.view('uint8'))
buf2 = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf2)
width = cencoding.width_from_max_int(values.max())
# without length
cencoding.encode_rle_bp(values, width, o)
l = o.tell()
o.seek(0)
cencoding.read_rle_bit_packed_hybrid(o, width, length=l, o=out)
assert (buf[:len(values)] == values).all()
def test_roundtrip_s3(s3):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data.loc[100, 'f'] = np.nan
data['cat'] = data.hello.astype('category')
noop = lambda x: True
myopen = s3.open
write(TEST_DATA+'/temp_parq', data, file_scheme='hive',
row_group_offsets=[0, 500], open_with=myopen, mkdirs=noop)
myopen = s3.open
pf = ParquetFile(TEST_DATA+'/temp_parq', open_with=myopen)
df = pf.to_pandas(categories=['cat', 'bcat'])
for col in data:
assert (df[col] == data[col])[~df[col].isnull()].all()
@pytest.mark.parametrize('scheme', ['simple', 'hive'])
@pytest.mark.parametrize('row_groups', [[0], [0, 500]])
@pytest.mark.parametrize('comp', ['SNAPPY', None, 'GZIP'])
def test_roundtrip(tempdir, scheme, row_groups, comp):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'u64': np.arange(1000, dtype=np.uint64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['a'] = np.array([b'a', b'b', b'c', b'd', b'e']*200, dtype="S1")
data['aa'] = data['a'].map(lambda x: 2*x).astype("S2")
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data['cat'] = data.hello.astype('category')
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme, row_group_offsets=row_groups,
compression=comp)
r = ParquetFile(fname)
assert r.fmd.num_rows == r.count() == 1000
df = r.to_pandas()
assert data.cat.dtype == 'category'
for col in r.columns:
assert (df[col] == data[col]).all()
# tests https://github.com/dask/fastparquet/issues/250
assert isinstance(data[col][0], type(df[col][0]))
def test_bad_coltype(tempdir):
df = pd.DataFrame({'0': [1, 2], (0, 1): [3, 4]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises((ValueError, TypeError)) as e:
write(fn, df)
assert "tuple" in str(e.value)
def test_bad_col(tempdir):
df = pd.DataFrame({'x': [1, 2]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises(ValueError) as e:
write(fn, df, has_nulls=['y'])
@pytest.mark.parametrize('scheme', ('simple', 'hive'))
def test_roundtrip_complex(tempdir, scheme,):
import datetime
data = pd.DataFrame({'ui32': np.arange(1000, dtype=np.uint32),
'i16': np.arange(1000, dtype=np.int16),
'ui8': np.array([1, 2, 3, 4]*250, dtype=np.uint8),
'f16': np.arange(1000, dtype=np.float16),
'dicts': [{'oi': 'you'}] * 1000,
't': [datetime.datetime.now()] * 1000,
'td': [datetime.timedelta(seconds=1)] * 1000,
'bool': np.random.choice([True, False], size=1000)
})
data.loc[100, 't'] = None
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
@pytest.mark.parametrize('df', [
pd.util.testing.makeMixedDataFrame(),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/London')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='Europe/Berlin')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz='UTC')}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.min)}),
pd.DataFrame({'x': pd.date_range('3/6/2012 00:00',
periods=10, freq='H', tz=datetime.timezone.max)})
])
def test_datetime_roundtrip(tempdir, df, capsys):
fname = os.path.join(tempdir, 'test.parquet')
w = False
if 'x' in df and 'Europe/' in str(df.x.dtype.tz):
with pytest.warns(UserWarning) as w:
write(fname, df)
else:
write(fname, df)
r = ParquetFile(fname)
if w:
assert any("UTC" in str(wm.message) for wm in w.list)
df2 = r.to_pandas()
pd.testing.assert_frame_equal(df, df2, check_categorical=False)
def test_nulls_roundtrip(tempdir):
fname = os.path.join(tempdir, 'temp.parq')
data = pd.DataFrame({'o': np.random.choice(['hello', 'world', None],
size=1000)})
data['cat'] = data['o'].astype('category')
writer.write(fname, data, has_nulls=['o', 'cat'])
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
assert (data[col].isnull() == df[col].isnull()).all()
def test_decimal_roundtrip(tempdir):
import decimal
def decimal_convert(x):
return decimal.Decimal(x)
fname = os.path.join(tempdir, 'decitemp.parq')
data = pd.DataFrame({'f64': np.arange(10000000, 10001000, dtype=np.float64) / 100000,
'f16': np.arange(1000, dtype=np.float16) /10000
})
data['f64']=data['f64'].apply(decimal_convert)
data['f16']=data['f16'].apply(decimal_convert)
writer.write(fname, data)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (data[col] == df[col]).all()
def test_make_definitions_with_nulls():
for _ in range(10):
out = np.empty(1000, dtype=np.int32)
o = cencoding.NumpyIO(out.view("uint8"))
data = pd.Series(np.random.choice([True, None],
size=np.random.randint(1, 1000)))
defs, d2 = writer.make_definitions(data, False)
buf = np.frombuffer(defs, dtype=np.uint8)
i = cencoding.NumpyIO(buf)
cencoding.read_rle_bit_packed_hybrid(i, 1, length=0, o=o)
assert (out[:len(data)] == ~data.isnull()).sum()
def test_make_definitions_without_nulls():
for _ in range(100):
out = | np.empty(10000, dtype=np.int32) | numpy.empty |
import math
# import igraph as ig
import random
import time
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import sdeint
from IPython.display import clear_output
from scipy.integrate import odeint
from scipy.interpolate import UnivariateSpline
from scipy.special import expit as sigmoid
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
def plot_trajectories(data, pred, graph, title=[1, 2.1]):
fig, axs = plt.subplots(1, 3, figsize=(10, 2.3))
fig.tight_layout(pad=0.2, w_pad=2, h_pad=3)
axs[0].plot(data.squeeze())
axs[1].plot(pred.squeeze())
i = 1
axs[1].set_title("Iteration = %i" % title[0] + ", " + "Loss = %1.3f" % title[1])
cax = axs[2].matshow(graph)
fig.colorbar(cax)
plt.show()
# plt.savefig('../Giff/fig'+i+'.png')
def compute_derivatives(y, k=4, s=4, t=None):
"""Compute derivatives of univariate stochastic process by interpolating trajectory with
univariate splines.
Args:
t, y (np.ndarray): time indeces t of time series y
Returns:
dy/dt (np.ndarray): derivative of y(t) evaluated at t
"""
if type(t) == type(None):
t = np.arange(y.shape[0])
temp_list = []
for i in range(y.shape[1]):
spl = UnivariateSpline(t, y[:, i], k=k, s=s) # s=0)
derspl = spl.derivative()
temp_list.append(derspl(t))
return np.transpose(
np.array(temp_list)
) # shape is number of time points x number of variables
def compute_spline(y, k=3, s=0.1):
"""Compute univariate stochastic process by interpolating trajectory with
univariate splines.
Args:
t, y (np.ndarray): time indeces t of time series y
Returns:
dy/dt (np.ndarray): derivative of y(t) evaluated at t
"""
t = np.arange(y.shape[0])
temp_list = []
for i in range(y.shape[1]):
spl = UnivariateSpline(t, y[:, i], k=k, s=s)
temp_list.append(spl(t))
return np.transpose(np.array(temp_list))
def make_var_stationary(beta, radius=0.97):
"""Rescale coefficients of VAR model to make stable."""
p = beta.shape[0]
lag = beta.shape[1] // p
bottom = np.hstack((np.eye(p * (lag - 1)), np.zeros((p * (lag - 1), p))))
beta_tilde = np.vstack((beta, bottom))
eigvals = np.linalg.eigvals(beta_tilde)
max_eig = max(np.abs(eigvals))
nonstationary = max_eig > radius
if nonstationary:
return make_var_stationary(0.95 * beta, radius)
else:
return beta
def simulate_var(p, T, lag, sparsity=0.2, beta_value=1.0, sd=0.1, seed=0):
if seed is not None:
np.random.seed(seed)
# Set up coefficients and Granger causality ground truth.
GC = np.eye(p, dtype=int)
beta = np.eye(p) * beta_value
num_nonzero = int(p * sparsity) - 1
for i in range(p):
choice = np.random.choice(p - 1, size=num_nonzero, replace=False)
choice[choice >= i] += 1
beta[i, choice] = beta_value
GC[i, choice] = 1
beta = np.hstack([beta for _ in range(lag)])
beta = make_var_stationary(beta)
# Generate data.
burn_in = 100
errors = np.random.normal(scale=sd, size=(p, T + burn_in))
X = np.zeros((p, T + burn_in))
X[:, :lag] = errors[:, :lag]
for t in range(lag, T + burn_in):
X[:, t] = np.dot(beta, X[:, (t - lag) : t].flatten(order="F"))
X[:, t] += +errors[:, t - 1]
return X.T[burn_in:], beta, GC
def lorenz(x, t, F=5):
"""Partial derivatives for Lorenz-96 ODE."""
p = len(x)
dxdt = np.zeros(p)
for i in range(p):
dxdt[i] = (x[(i + 1) % p] - x[(i - 2) % p]) * x[(i - 1) % p] - x[i] + F
return dxdt
def simulate_lorenz_96(
p, T, sigma=0.5, F=10.0, delta_t=0.1, sd=0.1, burn_in=1000, seed=None
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p)
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
# X = odeint(lorenz, x0, t, args=(F,))
# X += np.random.normal(scale=sd, size=(T + burn_in, p))
X = sdeint.itoint(lorenz, GG, x0, t)
# Set up Granger causality ground truth.
GC = np.zeros((p, p), dtype=int)
for i in range(p):
GC[i, i] = 1
GC[i, (i + 1) % p] = 1
GC[i, (i - 1) % p] = 1
GC[i, (i - 2) % p] = 1
return X[burn_in:], GC
def lotkavolterra(x, t, r, alpha):
"""Partial derivatives for Lotka-Volterra ODE.
Args:
- r (np.array): vector of self-interaction
- alpha (pxp np.array): matrix of interactions"""
p = len(x)
dxdt = np.zeros(p)
for i in range(p):
dxdt[i] = r[i] * x[i] * (1 - np.dot(alpha[i], x))
return dxdt
def simulate_lotkavolterra(
p, T, r, alpha, delta_t=0.1, sd=0.01, burn_in=1000, seed=None
):
if seed is not None:
np.random.seed(seed)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p) + 0.25
x0 = np.array([0.0222, 0.0014, 0.0013, 0.0008])
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
X = odeint(lotkavolterra, x0, t, args=(r, alpha,))
X += np.random.normal(scale=sd, size=(T + burn_in, p))
# Set up Granger causality ground truth.
GC = (alpha != 0) * 1
np.fill_diagonal(GC, 1)
return X[burn_in:], GC
def rossler(x, t, a=0, eps=0.1, b=4, d=2):
"""Partial derivatives for rossler ODE."""
p = len(x)
dxdt = np.zeros(p)
dxdt[0] = a * x[0] - x[1]
dxdt[p - 2] = x[(p - 3)]
dxdt[p - 1] = eps + b * x[(p - 1)] * (x[(p - 2)] - d)
for i in range(1, p - 2):
dxdt[i] = np.sin(x[(i - 1)]) - np.sin(x[(i + 1)])
return dxdt
def simulate_rossler(
p,
T,
sigma=0.5,
a=0,
eps=0.1,
b=4,
d=2,
delta_t=0.05,
sd=0.1,
burn_in=1000,
seed=None,
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
# Use scipy to solve ODE.
x0 = np.random.normal(scale=0.01, size=p)
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
# X = odeint(rossler, x0, t, args=(a,eps,b,d,))
# X += np.random.normal(scale=sd, size=(T + burn_in, p))
X = sdeint.itoint(rossler, GG, x0, t)
# Set up Granger causality ground truth.
GC = np.zeros((p, p), dtype=int)
GC[0, 0] = 1
GC[0, 1] = 1
GC[p - 2, p - 3] = 1
GC[p - 1, p - 1] = 1
GC[p - 1, p - 2] = 1
for i in range(1, p - 2):
# GC[i, i] = 1
GC[i, (i + 1)] = 1
GC[i, (i - 1)] = 1
return 400 * X[burn_in:], GC
def tumor_vaccine(
x,
t,
c2,
t1,
a0=0.1946,
a1=0.3,
c1=100,
c3=300,
delta0=0.00001,
delta1=0.00001,
d=0.0007,
f=0.62,
r=0.01,
):
"""Partial derivatives for rossler ODE."""
dxdt = np.zeros(5)
c0 = 1 / 369
dxdt[0] = (
a0 * x[0] * (1 - c0 * x[0])
- delta0 * x[0] * x[2] / (1 + c1 * x[1])
- delta0 * x[0] * x[4]
)
dxdt[1] = a1 * (x[0] ** 2) / (c2 + x[0] ** 2) - d * x[1]
dxdt[2] = (
f * x[2] * x[0] / (1 + c3 * x[0] * x[1])
- r * x[2]
- delta0 * x[3] * x[2]
- delta1 * x[2]
)
dxdt[3] = r * x[2] - delta1 * x[3]
if math.isclose(t, t1, abs_tol=0.5):
dxdt[4] = 5000 - delta1 * x[4]
else:
dxdt[4] = -delta1 * x[4]
return dxdt
def simulate_tumor(T, c2=300, t1=3, delta_t=0.05, sd=0.1, burn_in=0, seed=None):
if seed is not None:
np.random.seed(seed)
# Use scipy to solve ODE.
x0 = np.zeros(5)
x0[0] = 3
x0[1] = 0
x0[2] = 100
x0[3] = 0
x0[4] = 0
t = np.linspace(0, (T + burn_in) * delta_t, T + burn_in)
X = odeint(tumor_vaccine, x0, t, args=(c2, t1,))
X += np.random.normal(scale=sd, size=(T + burn_in, 5))
# Set up Granger causality ground truth.
p = 5
GC = np.zeros((p, p), dtype=int)
GC[0, 0] = 1
GC[0, 1] = 1
GC[p - 2, p - 3] = 1
GC[p - 1, p - 1] = 1
GC[p - 1, p - 2] = 1
for i in range(1, p - 2):
# GC[i, i] = 1
GC[i, (i + 1)] = 1
GC[i, (i - 1)] = 1
return X[burn_in:], GC
def glycolytic(
x,
t,
k1=0.52,
K1=100,
K2=6,
K3=16,
K4=100,
K5=1.28,
K6=12,
K=1.8,
kappa=13,
phi=0.1,
q=4,
A=4,
N=1,
J0=2.5,
):
"""Partial derivatives for Glycolytic oscillator model.
source:
https://www.pnas.org/content/pnas/suppl/2016/03/23/1517384113.DCSupplemental/pnas.1517384113.sapp.pdf
Args:
- r (np.array): vector of self-interaction
- alpha (pxp np.array): matrix of interactions"""
dxdt = np.zeros(7)
dxdt[0] = J0 - (K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
dxdt[1] = (
(2 * K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
- K2 * x[1] * (N - x[4])
- K6 * x[1] * x[4]
)
dxdt[2] = K2 * x[1] * (N - x[4]) - K3 * x[2] * (A - x[5])
dxdt[3] = K3 * x[2] * (A - x[5]) - K4 * x[3] * x[4] - kappa * (x[3] - x[6])
dxdt[4] = K2 * x[1] * (N - x[4]) - K4 * x[3] * x[4] - K6 * x[1] * x[4]
dxdt[5] = (
(-2 * K1 * x[0] * x[5]) / (1 + (x[5] / k1) ** q)
+ 2 * K3 * x[2] * (A - x[5])
- K5 * x[5]
)
dxdt[6] = phi * kappa * (x[3] - x[6]) - K * x[6]
return dxdt
def simulate_glycolytic(
T, sigma=0.5, delta_t=0.001, sd=0.01, burn_in=0, seed=None, scale=True
):
if seed is not None:
np.random.seed(seed)
def GG(x, t):
p = len(x)
return np.diag([sigma] * p)
x0 = np.zeros(7)
x0[0] = np.random.uniform(0.15, 1.6)
x0[1] = np.random.uniform(0.19, 2.16)
x0[2] = np.random.uniform(0.04, 0.2)
x0[3] = np.random.uniform(0.1, 0.35)
x0[4] = | np.random.uniform(0.08, 0.3) | numpy.random.uniform |
import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
from .UCSvis import clipatmax, clipatperc, StokestoRGB
# This provides a quick implementation of StokestoRGB with a provided simulated Stokes array
def generate(out_type = 'Stokes'):
################### Sample Stokes ###################
# out_type should be either 'Stokes' or 'IPA'
S0 = np.tile(np.linspace(0, 1, 100), (100, 1))
S0 *= np.random.uniform(.7, 1, (100, 100)) # add noise
# Example of false polarization signals:
# S1 and S2 are moderately high, but because the resulting AoLP has high variance, a delta mask would suppress it
S1 = np.random.uniform(-.4, .4, (100, 100))
S2 = | np.random.uniform(-.4, .4, (100, 100)) | numpy.random.uniform |
import numpy as np
from sklearn.cluster import DBSCAN, KMeans
AVG_EARTH_RADIUS = 6371
def _haversine_array(lat1, lng1, lat2, lng2):
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * | np.cos(lat2) | numpy.cos |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
import unittest
import numpy as np
from comsyl.autocorrelation.SigmaMatrix import SigmaWaist
from comsyl.autocorrelation.AutocorrelationBuilder import AutocorrelationBuilder
from comsyl.autocorrelation.AutocorrelationBuilderStrategies import BuilderStrategyPython
from comsyl.autocorrelation.PhaseSpaceDensity import PhaseSpaceDensity
from comsyl.math.utils import plotSurface, createGaussian2D, norm2D
class ConvolutionTester(object):
def __init__(self, Sigma_x, A, sigma_x):
self._Sigma_x = Sigma_x
self._A = A
self._sigma_x = sigma_x
max_x = max(Sigma_x, sigma_x) * 10
self._x_coordinates = np.linspace(-max_x, max_x, 300)
self._f = np.zeros(self._x_coordinates.shape[0])
def coordinates(self):
return self._x_coordinates
def evaluateNumerical(self, x_1, x_2):
for i_x, x in enumerate(self._x_coordinates):
self._f[i_x] = np.exp(-x**2 /(2*self._Sigma_x**2)) * \
np.exp(-(x-x_1)**2/(2*self._sigma_x**2)) * \
np.exp(-(x-x_2)**2/(2*self._sigma_x**2))
self._f *= self._A**2
res = np.trapz(y=self._f, x=self._x_coordinates)
return res
def evaluateAnalytical(self, x_1, x_2):
combined_sigma = self._Sigma_x**2 + 0.5 * self._sigma_x**2
res =(self._A ** 2) * np.sqrt(2*np.pi)
res *= np.sqrt(0.5/combined_sigma) * self._Sigma_x * self._sigma_x
res *= np.exp( -(x_1-x_2)**2/(4*self._sigma_x**2) )
res *= np.exp( -(x_1+x_2)**2/(8*combined_sigma) )
return res
class EigenmoderTest(unittest.TestCase):
@unittest.skip
def testGaussian(self):
x = np.linspace(-5,5,100)
y = np.linspace(-5,5,100)
f = createGaussian2D(1.0, 1.0, x, y)
plotSurface(x, y, f, False)
@unittest.skip
def testConvolutionVisual(self):
sigma_matrix = SigmaWaist(sigma_x=3e-6,
sigma_y=1e-6,
sigma_x_prime=5e-6,
sigma_y_prime=5e-6)
x_coordinates = np.linspace(-1e-5,1e-5,50)
y_coordinates = np.linspace(-1e-5,1e-5,50)
wavenumber = 1e+11
e_field = createGaussian2D(sigma_x=1.0e-6,
sigma_y=1.0e-6,
x_coordinates=x_coordinates,
y_coordinates=y_coordinates)
e_field = e_field + 0j
af = AutocorrelationBuilder(N_e=0.0000001,
sigma_matrix=sigma_matrix,
field=e_field,
x_coordinates=x_coordinates,
y_coordinates=y_coordinates,
k=wavenumber)
f = np.zeros((x_coordinates.shape[0],
y_coordinates.shape[0]), dtype=np.complex128)
# damping along y should be higher than along x because sigma_x > sigma_y for the density.
for y in (0.0 ,1.5e-6, -1.5e-6):
for i_x_1, x_1 in enumerate(x_coordinates):
for i_x_2, x_2 in enumerate(x_coordinates):
r_1 = np.array([x_1 + x_2, y])
r_2 = np.array([x_1 - x_2, y])
f[i_x_1, i_x_2] = af.evaluate(r_1, r_2)
plotSurface(x_coordinates, x_coordinates, f, False)
for x in (0.0, 1.5e-6, -1.5e-6):
for i_y_1, y_1 in enumerate(y_coordinates):
for i_y_2, y_2 in enumerate(y_coordinates):
r_1 = np.array([x, y_1 + y_2])
r_2 = np.array([x, y_1 - y_2])
f[i_y_1, i_y_2] = af.evaluate(r_1, r_2)
plotSurface(y_coordinates, y_coordinates, f, False)
@unittest.expectedFailure
def testAnalyticalFormula(self):
tester = ConvolutionTester(Sigma_x=1.0,
A=2.0,
sigma_x=1.0)
for x_1 in tester.coordinates()[::5]:
for x_2 in tester.coordinates()[::3]:
diff = np.abs(tester.evaluateNumerical(x_1, x_2)-tester.evaluateAnalytical(x_1, x_2))
self.assertLess(diff, 1e-10)
def testConvolution(self):
Sigma_x = 0.75e-6
Sigma_y = 1e-6
sigma_matrix = SigmaWaist(sigma_x=Sigma_x,
sigma_y=Sigma_y,
sigma_x_prime=1e-60,
sigma_y_prime=1e-60,
sigma_dd=0.89e-03)
x_coordinates = np.linspace(-0.5e-5, 0.5e-5, 200)
y_coordinates = np.linspace(-0.5e-5, 0.5e-5, 200)
wavenumber = 1e+11
sigma_x = 1.0e-6
sigma_y = 1.0e-6
e_field = createGaussian2D(sigma_x=sigma_x,
sigma_y=sigma_y,
x_coordinates=x_coordinates,
y_coordinates=y_coordinates)
e_field = e_field + 0j
e_field = e_field[np.newaxis, :, :]
tester_x = ConvolutionTester(Sigma_x=Sigma_x,
A=2.0,
sigma_x=sigma_x)
tester_y = ConvolutionTester(Sigma_x=Sigma_y,
A=2.0,
sigma_x=sigma_y)
af = AutocorrelationBuilder(N_e=0.0000001,
sigma_matrix=sigma_matrix,
weighted_fields=e_field,
x_coordinates=x_coordinates,
y_coordinates=y_coordinates,
k=wavenumber)
f = np.zeros((x_coordinates.shape[0],
y_coordinates.shape[0]), dtype=np.complex128)
t = np.zeros_like(f)
# Test along y slices
for y in y_coordinates:
for i_x_1, x_1 in enumerate(x_coordinates):
for i_x_2, x_2 in enumerate(x_coordinates):
r_1 = | np.array([x_1 + x_2, y]) | numpy.array |
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger('tensorflow')
logger.disabled = True
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
import utils
from tqdm import tqdm
import numpy as np
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("-const", type = float, default = 0.5)
parser.add_argument("-seed", type = int, default = 0)
parser.add_argument("-dataset", type = int, required = True, choices = [0, 1, 2, 3, 4])
parser.add_argument("-inspect", default = False, action = "store_true")
parser.add_argument("-hidden", type = int, default = 128)
parser.add_argument("-c", default = 0.1, type = float)
parser.add_argument("-xi", default = 0.1, type = float)
parser.add_argument("-lr", default = 0.0001, type = float)
parser.add_argument("-nepochs", default = 20, type = int)
args = parser.parse_args()
config = {
'overwrite_name': 'si-h%d-lr%g-c%g-xi%g-dataset%d' % (args.hidden, args.lr, args.c, args.xi, args.dataset),
}
utils.setup_logging(args.seed, config['overwrite_name'])
print("Seed: %d" % args.seed)
session_config = utils.set_seed(args.seed, args.dataset)
n_permute_tasks, it, layer_sizes = utils.setup_dataset(args.dataset, args.inspect)
config = {
**config,
'c': args.c,
'xi': args.xi,
'lr': args.lr,
}
if args.hidden != None:
layer_sizes = layer_sizes[:1] + [args.hidden for ln in range(len(layer_sizes)-2)] + layer_sizes[-1:]
else:
print('hidden unset')
config['layer_sizes'] = layer_sizes
print(config)
net = utils.SINetwork(
layer_sizes = config['layer_sizes'],
reshape_dims = it.reshape_dims,
seed = args.seed,
session_config = session_config,
it = it,
c = config['c'],
xi = config['xi'],
lr = config['lr'],
)
net.setup()
for n_task in range(n_permute_tasks):
it.switch_task(n_task)
it.i = 0
n_labels = len(np.unique(it.test_y))
division = 255.0 if args.dataset in [0, 1, 2] else 1.0
net.preprocessed_(n_task, {
'train_x': it.train_x.astype('float32') / division,
'test_x': it.test_x.astype('float32') / division,
'train_y': | np.eye(n_labels) | numpy.eye |
"""softmax cross entropy loss layer"""
import numpy as np
class SoftmaxCrossEntropy:
def __init__(self):
self.acc = 0
self.loss = np.zeros(1, dtype="f")
def forward(self, logit, gt):
self.Input = gt
# Softmax
eps = 1e-9 # a small number to prevent dividing by zero
exp_thetaX = np.exp(logit)
self.p = exp_thetaX / (eps + exp_thetaX.sum(axis=1, keepdims=True))
# calculate the accuracy
predict_y = np.argmax(self.p, axis=1)
gt_y = np.argmax(gt, axis=1)
com = predict_y == gt_y
self.acc = | np.mean(com) | numpy.mean |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_almost_equal
from cate.webapi.minheap import MinHeap
class MinHeapTest(TestCase):
def test_init_and_properties(self):
h = MinHeap(np.zeros(4, dtype=np.float), values=None, size=0)
self.assertEqual(h.size, 0)
assert_almost_equal(h.keys, [0., 0., 0., 0.])
assert_almost_equal(h.values, [0, 1, 2, 3])
h = MinHeap(np.array([1], dtype=np.float))
self.assertEqual(h.size, 1)
assert_almost_equal(h.keys, [1.])
assert_almost_equal(h.values, [0])
h = MinHeap(np.array([2, 1], dtype=np.float))
self.assertEqual(h.size, 2)
assert_almost_equal(h.keys, [1., 2.])
assert_almost_equal(h.values, [1, 0])
h = MinHeap(np.array([3, 2, 1], dtype=np.float))
self.assertEqual(h.size, 3)
assert_almost_equal(h.keys, [1., 2., 3.])
assert_almost_equal(h.values, [2, 1, 0])
h = MinHeap(np.array([4, 3, 2, 1], dtype=np.float))
self.assertEqual(h.size, 4)
assert_almost_equal(h.keys, [1., 3., 2., 4.])
assert_almost_equal(h.values, [3, 1, 2, 0])
h = MinHeap(np.array([5, 4, 3, 2, 1], dtype=np.float))
self.assertEqual(h.size, 5)
assert_almost_equal(h.keys, [1., 2., 3., 5., 4.])
assert_almost_equal(h.values, [4, 3, 2, 0, 1])
h = MinHeap(np.array([6, 5, 4, 3, 2, 1], dtype=np.float))
self.assertEqual(h.size, 6)
assert_almost_equal(h.keys, [1., 2., 4., 3., 5., 6.])
assert_almost_equal(h.values, [5, 4, 2, 3, 1, 0])
h = MinHeap(np.array([7, 6, 5, 4, 3, 2, 1], dtype=np.float))
self.assertEqual(h.size, 7)
assert_almost_equal(h.keys, [1., 3., 2., 4., 6., 7., 5.])
assert_almost_equal(h.values, [6, 4, 5, 3, 1, 0, 2])
def test_min(self):
h = MinHeap(np.array([1.2, 1.3, 1.1]), values=np.array([10, 25, 19]))
self.assertAlmostEqual(h.min[0], 1.1)
self.assertEqual(h.min[1], 19)
self.assertEqual(h.min[0], h.min_key)
self.assertEqual(h.min[1], h.min_value)
def test_get(self):
h = MinHeap(np.array([1.2, 1.3, 1.1]), values=np.array([10, 25, 19]))
self.assertAlmostEqual(h.get(2)[0], 1.2)
self.assertEqual(h.get(2)[1], 10)
self.assertEqual(h.get(2)[0], h.get_key(2))
self.assertEqual(h.get(2)[1], h.get_value(2))
def test_add(self):
h = MinHeap(np.zeros(6, dtype=np.float), values=None, size=0)
h.add(4., 0)
self.assertEqual(h.size, 1)
assert_almost_equal(h.keys, [4., 0., 0., 0., 0., 0.])
assert_almost_equal(h.values, [0, 1, 2, 3, 4, 5])
h.add(2., 1)
self.assertEqual(h.size, 2)
assert_almost_equal(h.keys, [2., 4., 0., 0., 0., 0.])
assert_almost_equal(h.values, [1, 0, 2, 3, 4, 5])
h.add(6.)
self.assertEqual(h.size, 3)
assert_almost_equal(h.keys, [2., 4., 6., 0., 0., 0.])
assert_almost_equal(h.values, [1, 0, 2, 3, 4, 5])
h.add(5.)
self.assertEqual(h.size, 4)
| assert_almost_equal(h.keys, [2., 4., 6., 5., 0., 0.]) | numpy.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
"""
Introduction
============
Module containing different functions to work with FQ result files.
Usage
=====
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
import sys
import json
import numpy as np
import skimage
from nested_lookup import nested_lookup # pip install nested-lookuppython
from scipy import ndimage
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
# Encoder class that allows to have numpy arrays in dictionary
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def colorbar(mappable):
"""
Function to place colorbars next to images and guarantee that they have the
same size.
From: https://joseph-long.com/writing/colorbars/
More info: https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
"""
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
return fig.colorbar(mappable, cax=cax)
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
https://gist.github.com/aubricus/f91fb55dc6ba5557fbab06119420dd6a
more info: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def read_image(file_name):
"""
Load image from file-name. Returns numpy array and dimensions
"""
img = skimage.io.imread(file_name)
# Move z axis last
img1 = np.moveaxis(img, 0, 2)
return img, img.shape
def save_image(file_name, img):
"""
Save different images.
"""
# Save renormalized image
skimage.io.imsave(file_name, img)
print("Filtering image saved as:{}".format(file_name))
def read_FQ_matlab(file_open):
""" Opens FISH-quant result files generated with Matlab (tab-delimited text file).
Args:
file_open (string): string containing the full file name.
Returns:
dictionary containing outlines of cells, and if present the detected spots.
"""
# Open file
with open(file_open, "r") as fh:
data = fh.readlines()
# Strip white space characters
data = [x.strip() for x in data]
# Loop over read-in data
fq_dict = {'cells':{},'file_names':{},'settings':{}}
iLine = 0
while iLine < len(data):
line = data[iLine]
# READ FILE NAMES
if 'IMG_Raw' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'smFISH':img_name[1]})
if 'IMG_Filtered' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'smFISH_filt':img_name[1]})
if 'IMG_DAPI' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'DAPI':img_name[1]})
if 'FILE_settings' in line:
img_name = line.split('\t')
if len(img_name) == 2:
fq_dict['file_names'].update({'settings':img_name[1]})
# READ IMAGE PARAMETERS
if 'PARAMETERS' in line:
iLine += 2
par_microscope = data[iLine].split('\t')
fq_dict['settings'].update({'microscope':{'pix_xy':float(par_microscope[0]),
'pix_z':float(par_microscope[1]),
'RI':float(par_microscope[2]),
'EX':float(par_microscope[3]),
'EM':float(par_microscope[4]),
'NA':float(par_microscope[5]),
'type':par_microscope[6]}})
# New cell
if 'CELL_START' in line:
# Get name of cell
cell_id = line.split('\t')[1]
### POSITION OF CELL
# Read X-POS
iLine += 1
pos_list = (data[iLine].replace('X_POS\t','')).split('\t')
x_pos = [int(s) for s in pos_list]
# Read Y-POS
iLine += 1
pos_list = (data[iLine].replace('Y_POS\t','')).split('\t')
y_pos = [int(s) for s in pos_list]
# Read Z-POS
iLine += 1
pos_list = (data[iLine].replace('Z_POS\t','')).split('\t')
if len(pos_list) > 1:
z_pos = [int(s) for s in pos_list]
else:
z_pos = ['']
fq_dict['cells'].update({cell_id:{'cell_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}})
# New nucleus
if 'Nucleus_START' in line:
# Get name of cell
nuc_id = line.split('\t')[1]
### POSITION OF CELL
# Read X-POS
iLine += 1
pos_list = (data[iLine].replace('X_POS\t','')).split('\t')
x_pos = [int(s) for s in pos_list]
# Read Y-POS
iLine += 1
pos_list = (data[iLine].replace('Y_POS\t','')).split('\t')
y_pos = [int(s) for s in pos_list]
# Read Z-POS
iLine += 1
pos_list = (data[iLine].replace('Z_POS\t','')).split('\t')
if len(pos_list) > 1:
z_pos = [int(s) for s in pos_list]
else:
z_pos = ['']
fq_dict['cells'][cell_id].update({nuc_id:{'nuc_pos':{'x': x_pos,'y': y_pos,'z': z_pos}}})
# Position of detected RNAS
if 'SPOTS_START' in line:
iLine += 2 # Move over header
RNA_prop = []
while not('SPOTS_END' in data[iLine]):
RNA_prop.append([float(s) for s in data[iLine].split('\t')])
iLine += 1
# Assign to dictionary
fq_dict['cells'][cell_id].update({'spots': np.array(RNA_prop)})
# Up date line counter
iLine += 1
return fq_dict
def get_rna(fq_dict):
"""
Obtain a numpy array with all detected spots in the image. Detection results
are saved in a dictionary (see read_FQ_results_matlab for more details).
"""
RNAall = nested_lookup('spots', fq_dict) # returns list of numpy arrays
for idx,val in enumerate(RNAall):
if idx == 0:
spots_all = np.copy(val)
else:
spots_all = np.append(spots_all,val,axis=0)
return spots_all
def calc_expression_density_plot(fq_dict,img_size,outline_int = 'max',flag_plot = False):
""" Calculate expression density image.
RNA detection results are used to calculate a 2D image where each cell
is displayed with pixel values corresponding to the number of RNAs in the cell.
Args:
imageprop ('dict'): dictionary containing information about outlines of cells
and nuclei as well as (if present) positions of RNA molecules
img_size (tuple): specifying the size of the image.
outline_int (string) specifying how pixel values of cell outlines in the
density plot.'max' means that the maximum number of RNAs per cell is used.
'*nt' with int being the integer value that should be used.
flag_plot ('bool'): flag to indicate if results should be plotted.
Returns:
2D numpy arrays (i) image with outlines, (ii) image with
expression density, (iii) image wiht expression density and outlines.
"""
img_density = np.zeros(img_size, dtype=np.uint16)
img_outline = | np.zeros(img_size, dtype=np.uint8) | numpy.zeros |
import argparse
from collections import namedtuple
import datetime
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import random
import time
import yaml
import sys
import meshcat
import meshcat.geometry as g
import meshcat.transformations as tf
import pydrake
from pydrake.systems.analysis import Simulator
from pydrake.common.eigen_geometry import Quaternion, AngleAxis, Isometry3
from pydrake.geometry import (
Box,
HalfSpace,
SceneGraph,
Sphere
)
from pydrake.geometry.render import (
CameraProperties, DepthCameraProperties,
MakeRenderEngineVtk, RenderEngineVtkParams
)
from pydrake.math import RigidTransform, RollPitchYaw
from pydrake.multibody.inverse_kinematics import InverseKinematics
from pydrake.multibody.tree import (
SpatialInertia,
UniformGravityFieldElement,
UnitInertia
)
from pydrake.multibody.plant import (
AddMultibodyPlantSceneGraph,
CoulombFriction,
MultibodyPlant
)
from pydrake.solvers.mathematicalprogram import MathematicalProgram, Solve
from pydrake.systems.framework import BasicVector, DiagramBuilder, LeafSystem
from pydrake.systems.meshcat_visualizer import MeshcatVisualizer
from pydrake.systems.sensors import RgbdSensor
BoxWithLabel = namedtuple('BoxWithLabel', [
'pose', # quatxyz
'dimensions', # width (x), depth (y), height (z)
'label_face', # "[pn][xyz]" e.g. "px" or "nz"
'label_uv']) # uv coords, 0 to 1, on the given face of the label origin
def get_label_info_from_box_with_label(box):
label_origin = np.zeros(3)
s = 1
if box.label_face[0] == 'n':
s = -1
box_label_dim = ord(box.label_face[1]) - ord('x')
label_origin[box_label_dim] = s
# In the other dims, offset by UV
other_dims = [0, 1, 2]
other_dims.remove(box_label_dim)
label_origin[other_dims[0]] = (box.label_uv[0] - 0.5)*2.0
label_origin[other_dims[1]] = (box.label_uv[1] - 0.5)*2.0
label_origin *= box.dimensions/2.
label_size = np.ones(3) * 0.05
label_size[box_label_dim] = 0.001
return label_origin, label_size
def generate_keypoints_from_box_with_label(box):
assert(isinstance(box, BoxWithLabel))
pts = np.array([[-1., -1., -1., -1, 1., 1., 1., 1.],
[-1., -1., 1., 1., -1., -1., 1., 1.],
[-1., 1., -1., 1., -1., 1., -1., 1.]])
pts = (pts.T * box.dimensions).T / 2.
# At make a point for the label origin
label_origin, _ = get_label_info_from_box_with_label(box)
pts = np.hstack([pts, label_origin[:, np.newaxis]])
vals = np.zeros(pts.shape[1]).reshape(1, -1)
vals[0, -1] = 1.
quat = box.pose[:4] / np.linalg.norm(box.pose[:4])
pts = RigidTransform(p=box.pose[-3:], quaternion=Quaternion(quat)).multiply(pts)
return pts, vals
def generate_mbp_sg_diagram(seed):
np.random.seed(seed)
# Build up a list of boxes with sizes and label placements
n_boxes = np.random.geometric(0.3) + 1
boxes = []
for box_i in range(n_boxes):
# Generate random pose
xyz = np.array([
| np.random.uniform(-0.5, 0.5) | numpy.random.uniform |
import random
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import warnings
warnings.filterwarnings('ignore')
#reproductibility
random.seed(1996)
np.random.seed(1996)
#load training labels
train_labels = pd.read_csv('../input/train_labels.csv')
training_labels = np.array(list(train_labels.drop("name", axis=1)["invasive"]))
#load training data (allready normalized)
training_data = joblib.load("invasive_species_lbp_training_data.pkl")
print("training set size : ", len(training_data))
#shuffling data
training_set = list(zip(training_labels, training_data))
random.shuffle(training_set)
#split training set
train_set, test_set = train_test_split(training_set, test_size=.1)
Y_train, X_train = zip(*train_set)
Y_test, X_test = zip(*test_set)
X_train = np.array(X_train)
Y_train = | np.array(Y_train) | numpy.array |
import random
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from warnings import warn
# For the repeatability
random.seed(2018)
def seq2bit(seq):
seq = | np.array(seq, dtype=np.uint8) | numpy.array |
"""
Title:
anylogo.py
Creation Date:
2017-07-31
Author(s):
<NAME>
Purpose:
This file contains a variety of functions used to generate sequence logos.
License: MIT
Copyright (c) 2017 <NAME> group @ California Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.MIT
"""
from __future__ import division
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import glob
import re
import pdb
import string
import os
import NB_sortseq_utils as utils
from matplotlib.textpath import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties, findSystemFonts
from matplotlib.colors import to_rgba
from matplotlib import font_manager
# Set constants
SMALL = 1E-6
#DEFAULT_FONT = 'Arial Rounded Bold'
DEFAULT_FONT = 'Lucida Sans Unicode'
# Build font_file_dict
font_file_list = \
findSystemFonts(fontpaths=None, fontext='ttf')
FONT_FILE_DICT = {}
for font_file in font_file_list:
base_name = os.path.basename(font_file)
font_name = os.path.splitext(base_name)[0]
FONT_FILE_DICT[str(font_name)] = font_file
FONTS = FONT_FILE_DICT.keys()
FONTS = sorted(FONTS)
class Box:
def __init__(self, xlb, xub, ylb, yub):
self.xlb = xlb
self.xub = xub
self.ylb = ylb
self.yub = yub
self.x = xlb
self.y = ylb
self.w = xub-xlb
self.h = yub-ylb
self.bounds = (xlb, xub, ylb, yub)
class Character:
def __init__(self, c, x, y, w, h, color,
font_name=DEFAULT_FONT,
flip=False,
shade=1,
alpha=1):
assert w > 0
assert h > 0
self.c = c
self.box = Box(x,x+w,y,y+h)
self.font_name = font_name
self.flip = flip
# Set color
try:
self.color = np.array(to_rgba(color))*\
np.array([shade,shade,shade,1])
except:
assert False, 'Error! Unable to interpret color %s'%repr(color)
# Set tranparency
self.color[3] = alpha
def draw(self,ax):
# Define character bounding box
bbox = list(self.box.bounds)
if self.flip:
bbox[2:] = bbox[2:][::-1]
# Draw character
put_char_in_box(ax, self.c, bbox, \
facecolor=self.color,
font_name=self.font_name)
# Logo base class
class Logo:
def __init__(self,logo_set=False):
self.logo_set = logo_set
def draw(self,ax):
assert self.logo_set, 'Error: cant plot because logo is not set yet.'
# Draw floor
plt.axhline(0,linewidth=self.floor_line_width,color='k',zorder=-1)
# Draw characters
for char in self.char_list:
char.draw(ax)
# Logo-specific formatting
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
ax.set_xticks(self.xticks)
ax.set_yticks(self.yticks)
ax.set_yticklabels(self.yticklabels)
ax.set_xticklabels(self.xticklabels,rotation=90)
ax.set_xlabel(self.xlabel)
ax.set_ylabel(self.ylabel)
# Standard formatting
ax.xaxis.set_tick_params(width=0,length=0)
plt.box('off')
# Information logo clas
class InformationLogo(Logo):
def __init__(self, prob_df, bg_df, color_dict, ylabel=None,
use_transparency=False,
font_name=DEFAULT_FONT, floor_line_width=.5):
df = prob_df.copy()
info_vec = prob_df.values*np.log2(prob_df.values/bg_df.values)
df.loc[:,:] = prob_df.values*info_vec.sum(axis=1)[:,np.newaxis]
assert all(np.ravel(df.values)>=0)
char_list, box = compute_logo_characters(
df=df,
stack_order='big_on_top',
color_dict=color_dict,
font_name=font_name,
use_transparency=use_transparency)
assert np.isclose(box.ylb,0),\
'Error: box.ylb=%f is not zero.'%box.ylb
self.signed_heights_df = df
self.floor_line_width = floor_line_width
self.font_name = font_name
self.prob_df = prob_df.copy()
self.bg_df = bg_df.copy()
self.box = box
self.char_list = char_list
self.xlim = [box.xlb, box.xub]
self.xticks = range(
int(np.ceil(self.xlim[0])),
int(np.floor(self.xlim[1]))+1
)
self.xticklabels = ['%d'%x for x in self.xticks]
self.xlabel = 'position'
self.ylim = [-.01, max(2,box.yub)+.01]
self.yticks = range(
int(np.ceil(self.ylim[0])),
int(np.floor(self.ylim[1]))+1
)
self.yticklabels = ['%d'%y for y in self.yticks]
self.ylabel = 'information\n(bits)' if (ylabel is None) else ylabel
# Register that logo has been set
Logo.__init__(self,logo_set=True)
# Probability logo clas
class ProbabilityLogo(Logo):
def __init__(self, prob_df, color_dict, ylabel=None,
use_transparency=True,
font_name=DEFAULT_FONT, floor_line_width=.5):
df = prob_df.copy()
assert all(np.ravel(df.values)>=0)
char_list, box = compute_logo_characters(
df=df,
stack_order='small_on_top',
color_dict=color_dict,
font_name=font_name,
max_alpha_val=1.0,
use_transparency=use_transparency)
assert np.isclose(box.ylb,0),\
'Error: box.ylb=%f is not zero.'%box.ylb
assert np.isclose(box.yub,1),\
'Error: box.yub=%f is not one.'%box.yub
self.signed_heights_df = df
self.floor_line_width = floor_line_width
self.font_name = font_name
self.prob_df = prob_df.copy()
self.box = box
self.char_list = char_list
self.xlim = [box.xlb, box.xub]
self.xticks = range(
int(np.ceil(self.xlim[0])),
int(np.floor(self.xlim[1]))+1
)
self.xticklabels = ['%d'%x for x in self.xticks]
self.xlabel = 'position'
self.ylim = [-.01, 1.01]
self.yticks = [0, .5, 1]
self.yticklabels = ['%.1f'%y for y in self.yticks]
self.ylabel = 'probability' if (ylabel is None) else ylabel
# Register that logo has been set
Logo.__init__(self,logo_set=True)
# Effect logo clas
class EffectLogo(Logo):
def __init__(self, effect_df, color_dict, ylabel=None,
use_transparency=True,
font_name=DEFAULT_FONT, floor_line_width=.5):
df = effect_df.copy()
char_list, box = compute_logo_characters(
df=df,
stack_order='big_on_top',
color_dict=color_dict,
font_name=font_name,
use_transparency=use_transparency,
neg_shade=.5,
neg_flip=True
)
self.signed_heights_df = df
self.floor_line_width = floor_line_width
self.font_name = font_name
self.effect_df = effect_df.copy()
self.box = box
self.char_list = char_list
self.xlim = [box.xlb, box.xub]
self.xticks = range(
int(np.ceil(self.xlim[0])),
int(np.floor(self.xlim[1]))+1
)
self.xticklabels = ['%d'%x for x in self.xticks]
self.xlabel = 'position'
self.ylim = [box.ylb, box.yub]
self.yticks = range(
int(np.ceil(self.ylim[0])),
int(np.floor(self.ylim[1]))+1
)
self.yticklabels = ['%d'%y for y in self.yticks]
self.ylabel = 'effect' if (ylabel is None) else ylabel
# Register that logo has been set
Logo.__init__(self,logo_set=True)
# Enrichment logo clas
class EnrichmentLogo(Logo):
def __init__(self, prob_df, bg_df, color_dict,
ylabel=None, use_transparency=True,
font_name=DEFAULT_FONT, floor_line_width=.5):
df = prob_df.copy()
df.loc[:,:] = np.log2(prob_df.values/bg_df.values)
char_list, box = compute_logo_characters(
df=df,
stack_order='big_on_top',
color_dict=color_dict,
font_name=font_name,
use_transparency=use_transparency,
neg_shade=.5,
neg_flip=True
)
self.signed_heights_df = df
self.floor_line_width = floor_line_width
self.font_name = font_name
self.prob_df = prob_df.copy()
self.bg_df = bg_df.copy()
self.box = box
self.char_list = char_list
self.xlim = [box.xlb, box.xub]
self.xticks = range(
int( | np.ceil(self.xlim[0]) | numpy.ceil |
from pygame.locals import *
from random import randint
import pygame
import time
import numpy as np
import datetime
import agents
### WALL ###
class Wall:
def __init__(self,game_size,box_size, random_wall = False):
self.box_size = box_size
self.game_size = game_size
self.X, self.Y = self.make_wall(game_size[0]-1, game_size[1]-1, random_wall)
def make_wall(self, x_size,y_size, random_wall = True):
x = np.concatenate([np.array(range(x_size)),np.array(range(x_size)),
np.repeat(0,y_size), np.repeat(x_size,y_size)])
y = np.concatenate([np.repeat(0,y_size), np.repeat(y_size, x_size),
np.array(range(y_size)), np.array(range(y_size))])
if random_wall:
x = np.append(x, [np.random.randint(x_size) for _ in range(5)])
y = np.append(y, [np.random.randint(y_size) for _ in range(5)])
return x, y
def draw(self, surface, image):
for x, y in zip(self.X,self.Y):
surface.blit(image,(x*self.box_size, y*self.box_size))
class Apple:
x = 0
y = 0
def __init__(self,x,y, game_size, box_size):
self.game_size = game_size
if x is None:
self.x = randint(1,self.game_size[0]-2)
else:
self.x = x
if y is None:
self.y = randint(1,self.game_size[0]-2)
else:
self.y = y
self.box_size = box_size
self.board = np.zeros(game_size)
def draw(self, surface, image):
surface.blit(image,(self.x*self.box_size, self.y*self.box_size))
### PLAYER ###
class Player:
direction = 0
length = 2
def __init__(self, length, game_size, box_size):
xh = randint(3,game_size[1]-2)
self.x = [xh,xh-1,xh-2]
yh = randint(2,game_size[0]-2)
self.y = [yh,yh,yh]
self.length = length
self.game_size = game_size
self.box_size = box_size
for i in range(0,2000):
self.x.append(-1)
self.y.append(-1)
def update(self):
# update previous positions
for i in range(self.length-1,0,-1):
self.x[i] = self.x[i-1]
self.y[i] = self.y[i-1]
# update position of head of snake
if self.direction == 0:
self.x[0] = self.x[0] + 1
if self.direction == 1:
self.x[0] = self.x[0] - 1
if self.direction == 2:
self.y[0] = self.y[0] - 1
if self.direction == 3:
self.y[0] = self.y[0] + 1
def moveRight(self):
self.direction = 0
def moveLeft(self):
self.direction = 1
def moveUp(self):
self.direction = 2
def moveDown(self):
self.direction = 3
def draw(self, surface, image, image_head = None):
if image_head is None:
image_head = image
for i in range(1):
surface.blit(image_head,(self.x[i]*self.box_size,self.y[i]*self.box_size))
for i in range(1,self.length):
surface.blit(image,(self.x[i]*self.box_size,self.y[i]*self.box_size))
class Snake:
# Render parameters
windowHeight = 1000
windowWidth = 1000
box_size = 20
def __init__(self, render = True, game_size = (10,10), time_reward = -0.02):
self.game_size = game_size
self.render = render
self.time_reward = time_reward
if render:
pygame.init()
self._display_surf = pygame.display.set_mode((self.windowWidth,self.windowHeight), pygame.HWSURFACE)
pygame.display.set_caption('snake game to train agents in')
# Rendering stuff
apple_img = 130*np.ones((self.box_size-1,self.box_size-1))
wall_img = 50* | np.ones((self.box_size-1,self.box_size-1)) | numpy.ones |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .distortion import (perm_adj, perm_features, filter_adj, filter_features,
gray_color_threshold, degree_threshold,
area_threshold)
class DistortionTest(TestCase):
def test_perm_adj(self):
adj = [[0, 2, 1, 0], [2, 0, 0, 1], [1, 0, 0, 2], [0, 1, 2, 0]]
adj = sp.coo_matrix(adj)
perm = np.array([2, 1, 3, 0])
expected = [[0, 0, 2, 1], [0, 0, 1, 2], [2, 1, 0, 0], [1, 2, 0, 0]]
assert_equal(perm_adj(adj, perm).toarray(), expected)
# Add fake nodes.
perm = np.array([3, 2, 0, 4, 1, 5])
expected = [[0, 2, 0, 0, 1, 0], [2, 0, 1, 0, 0, 0], [0, 1, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0], [1, 0, 2, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
assert_equal(perm_adj(adj, perm).toarray(), expected)
def test_perm_features(self):
features = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
perm = np.array([2, 1, 3, 0])
expected = [[5, 6], [3, 4], [7, 8], [1, 2]]
assert_equal(perm_features(features, perm), expected)
# Add fake nodes.
perm = np.array([3, 2, 0, 4, 1, 5])
expected = [[7, 8], [5, 6], [1, 2], [0, 0], [3, 4], [0, 0]]
assert_equal(perm_features(features, perm), expected)
def test_filter_adj(self):
adj = [[0, 2, 1, 0], [2, 0, 0, 1], [1, 0, 0, 2], [0, 1, 2, 0]]
adj = sp.coo_matrix(adj)
nodes = np.array([0, 1, 3])
expected = [[0, 2, 0], [2, 0, 1], [0, 1, 0]]
assert_equal(filter_adj(adj, nodes).toarray(), expected)
def test_filter_features(self):
features = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
nodes = np.array([1, 3])
expected = [[3, 4], [7, 8]]
assert_equal(filter_features(features, nodes), expected)
def test_gray_color_threshold(self):
features = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
nodes = gray_color_threshold(None, features, 3)
expected = [1, 2, 3]
assert_equal(nodes, expected)
def test_degree_threshold(self):
adj = [[0, 1, 1, 1, 1], [1, 0, 1, 0, 0], [1, 1, 0, 1, 0],
[1, 0, 1, 0, 1], [1, 0, 0, 1, 0]]
adj = sp.coo_matrix(adj)
nodes = degree_threshold(adj, None, 3)
expected = [1, 2, 3, 4]
assert_equal(nodes, expected)
def test_area_threshold(self):
features = | np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) | numpy.array |
"""Predicting Module."""
from collections import OrderedDict
from typing import List
import click
import pickle as pkl
import numpy as np
import pandas as pd
from albumentations import Compose
from PIL import Image
from pytesseract import image_to_string
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square, convex_hull_image
from skimage.transform import resize
from skimage.util import invert
from tablenet import TableNetModule
class Predict:
"""Predict images using pre-trained model."""
def __init__(self, checkpoint_path: str, transforms: Compose, threshold: float = 0.5, per: float = 0.005):
"""Predict images using pre-trained TableNet model.
Args:
checkpoint_path (str): model weights path.
transforms (Optional[Compose]): Compose object from albumentations used for pre-processing.
threshold (float): threshold to consider the value as correctly classified.
per (float): Minimum area for tables and columns to be considered.
"""
self.transforms = transforms
self.threshold = threshold
self.per = per
self.model = TableNetModule.load_from_checkpoint(checkpoint_path)
self.model.eval()
self.model.requires_grad_(False)
def predict(self, image: Image) -> List[pd.DataFrame]:
"""Predict a image table values.
Args:
image (Image): PIL.Image to
Returns (List[pd.DataFrame]): Tables in pandas DataFrame format.
"""
processed_image = self.transforms(image=np.array(image))["image"]
table_mask, column_mask = self.model.forward(processed_image.unsqueeze(0))
print("dumping..")
pkl.dump({"table": table_mask, "column": column_mask}, open("dump.pkl", "wb"))
print("DUMPED")
# table_mask = self._apply_threshold(table_mask)
# column_mask = self._apply_threshold(column_mask)
# segmented_tables = self._process_tables(self._segment_image(table_mask))
# tables = []
# for table in segmented_tables:
# segmented_columns = self._process_columns(self._segment_image(column_mask * table))
# if segmented_columns:
# cols = []
# for column in segmented_columns.values():
# cols.append(self._column_to_dataframe(column, image))
# tables.append(pd.concat(cols, ignore_index=True, axis=1))
# return tables
def _apply_threshold(self, mask):
mask = mask.squeeze(0).squeeze(0).numpy() > self.threshold
return mask.astype(int)
def _process_tables(self, segmented_tables):
width, height = segmented_tables.shape
tables = []
for i in np.unique(segmented_tables)[1:]:
table = np.where(segmented_tables == i, 1, 0)
if table.sum() > height * width * self.per:
tables.append(convex_hull_image(table))
return tables
def _process_columns(self, segmented_columns):
width, height = segmented_columns.shape
cols = {}
for j in | np.unique(segmented_columns) | numpy.unique |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import collections
from abc import abstractmethod
from lpot.utils.utility import LazyImport, singleton
torchvision = LazyImport('torchvision')
torch = LazyImport('torch')
tf = LazyImport('tensorflow')
mx = LazyImport('mxnet')
cv2 = LazyImport('cv2')
class Transforms(object):
def __init__(self, process, concat_general=True):
transform_map = {"preprocess": self._get_preprocess,
"postprocess": self._get_postprocess,
"general": self._get_general, }
self.transforms = transform_map[process]()
# if set True users can use general transform in both preprocess or postprocess
if concat_general:
self.transforms.update(transform_map['general']())
@abstractmethod
def _get_preprocess(self):
raise NotImplementedError
@abstractmethod
def _get_postprocess(self):
raise NotImplementedError
@abstractmethod
def _get_general(self):
raise NotImplementedError
class TensorflowTransforms(Transforms):
def _get_preprocess(self):
preprocess = {
"DecodeImage": TensorflowWrapFunction(tf.io.decode_jpeg),
"EncodeJpeg": TensorflowWrapFunction(tf.io.encode_jpeg),
}
# update the registry transforms
preprocess.update(TENSORFLOW_TRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(TENSORFLOW_TRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {}
general.update(TENSORFLOW_TRANSFORMS["general"])
return general
class MXNetTransforms(Transforms):
def _get_preprocess(self):
preprocess = {
'ToTensor': PytorchMxnetWrapFunction(
mx.gluon.data.vision.transforms.ToTensor),
'CenterCrop': PytorchMxnetWrapFunction(
mx.gluon.data.vision.transforms.CenterCrop),
'RandomHorizontalFlip': PytorchMxnetWrapFunction(
mx.gluon.data.vision.transforms.RandomFlipLeftRight),
'RandomVerticalFlip': PytorchMxnetWrapFunction(
mx.gluon.data.vision.transforms.RandomFlipTopBottom),
}
preprocess.update(MXNET_TRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(MXNET_TRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {
'Compose': mx.gluon.data.vision.transforms.Compose,
'Cast': PytorchMxnetWrapFunction(
mx.gluon.data.vision.transforms.Cast),
}
general.update(MXNET_TRANSFORMS["general"])
return general
class PyTorchTransforms(Transforms):
def _get_preprocess(self):
preprocess = {
"ToTensor": PytorchMxnetWrapFunction(
torchvision.transforms.ToTensor),
"ToPILImage": PytorchMxnetWrapFunction(
torchvision.transforms.ToPILImage),
"CenterCrop": PytorchMxnetWrapFunction(
torchvision.transforms.CenterCrop),
"RandomCrop": PytorchMxnetWrapFunction(
torchvision.transforms.RandomCrop),
"RandomHorizontalFlip": PytorchMxnetWrapFunction(
torchvision.transforms.RandomHorizontalFlip),
"RandomVerticalFlip": PytorchMxnetWrapFunction(
torchvision.transforms.RandomVerticalFlip),
"Pad": PytorchMxnetWrapFunction(
torchvision.transforms.Pad),
"ColorJitter": PytorchMxnetWrapFunction(
torchvision.transforms.ColorJitter),
}
preprocess.update(PYTORCH_TRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(PYTORCH_TRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {
"Compose": torchvision.transforms.Compose,
}
general.update(PYTORCH_TRANSFORMS["general"])
return general
class ONNXRTQLTransforms(Transforms):
def _get_preprocess(self):
preprocess = {}
preprocess.update(ONNXRT_QL_TRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(ONNXRT_QL_TRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {}
general.update(ONNXRT_QL_TRANSFORMS["general"])
return general
class ONNXRTITTransforms(Transforms):
def _get_preprocess(self):
preprocess = {}
preprocess.update(ONNXRT_IT_TRANSFORMS["preprocess"])
return preprocess
def _get_postprocess(self):
postprocess = {}
postprocess.update(ONNXRT_IT_TRANSFORMS["postprocess"])
return postprocess
def _get_general(self):
general = {}
general.update(ONNXRT_IT_TRANSFORMS["general"])
return general
framework_transforms = {"tensorflow": TensorflowTransforms,
"mxnet": MXNetTransforms,
"pytorch": PyTorchTransforms,
"pytorch_ipex": PyTorchTransforms,
"onnxrt_qlinearops": ONNXRTQLTransforms,
"onnxrt_integerops": ONNXRTITTransforms}
# transform registry will register transforms into these dicts
TENSORFLOW_TRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
MXNET_TRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
PYTORCH_TRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
ONNXRT_QL_TRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
ONNXRT_IT_TRANSFORMS = {"preprocess": {}, "postprocess": {}, "general": {}}
registry_transforms = {"tensorflow": TENSORFLOW_TRANSFORMS,
"mxnet": MXNET_TRANSFORMS,
"pytorch": PYTORCH_TRANSFORMS,
"pytorch_ipex": PYTORCH_TRANSFORMS,
"onnxrt_qlinearops": ONNXRT_QL_TRANSFORMS,
"onnxrt_integerops": ONNXRT_IT_TRANSFORMS}
class TRANSFORMS(object):
def __init__(self, framework, process):
assert framework in ("tensorflow", "pytorch", "pytorch_ipex", "onnxrt_qlinearops",
"onnxrt_integerops", "mxnet"), \
"framework support tensorflow pytorch mxnet onnxrt"
assert process in ("preprocess", "postprocess",
"general"), "process support preprocess postprocess, general"
self.transforms = framework_transforms[framework](process).transforms
self.framework = framework
self.process = process
def __getitem__(self, transform_type):
assert transform_type in self.transforms.keys(), "transform support {}".\
format(self.transforms.keys())
return self.transforms[transform_type]
def register(self, name, transform_cls):
assert name not in registry_transforms[self.framework][self.process].keys(), \
'register transform name already exists.'
registry_transforms[self.framework][self.process].update({name: transform_cls})
def transform_registry(transform_type, process, framework):
"""The class decorator used to register all transform subclasses.
Args:
transform_type (str): Transform registration name
process (str): support 3 process including 'preprocess', 'postprocess', 'general'
framework (str): support 4 framework including 'tensorflow', 'pytorch', 'mxnet', 'onnxrt'
cls (class): The class of register.
Returns:
cls: The class of register.
"""
def decorator_transform(cls):
for single_framework in [fwk.strip() for fwk in framework.split(',')]:
assert single_framework in [
"tensorflow",
"mxnet",
"pytorch",
"pytorch_ipex",
"onnxrt_qlinearops",
"onnxrt_integerops"
], "The framework support tensorflow mxnet pytorch onnxrt"
if transform_type in registry_transforms[single_framework][process].keys():
raise ValueError('Cannot have two transforms with the same name')
registry_transforms[single_framework][process][transform_type] = cls
return cls
return decorator_transform
class BaseTransform(object):
"""The base class for transform. __call__ method is needed when write user specific transform
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TensorflowWrapFunction(object):
def __init__(self, transform_func):
self.transform_func = transform_func
def __call__(self, **kwargs):
return TensorflowTransform(self.transform_func, **kwargs)
class TensorflowTransform(BaseTransform):
def __init__(self, transform_func, **kwargs):
self.kwargs = kwargs
self.transform_func = transform_func
def __call__(self, sample):
image, label = sample
image = self.transform_func(image, **self.kwargs)
return (image, label)
class PytorchMxnetWrapFunction(object):
def __init__(self, transform_func):
self.transform_func = transform_func
def __call__(self, **args):
return PytorchMxnetTransform(self.transform_func(**args))
class PytorchMxnetTransform(BaseTransform):
def __init__(self, transform_func):
self.transform_func = transform_func
def __call__(self, sample):
image, label = sample
image = self.transform_func(image)
return (image, label)
interpolation_map = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
}
interpolation_pytorch_map = {
'nearest': 0,
'bilinear': 2,
'bicubic': 3,
}
interpolation_mxnet_map = {
'nearest': 0,
'bilinear': 1,
'bicubic': 2,
}
@transform_registry(transform_type="Compose", process="general", \
framework="onnxrt_qlinearops, onnxrt_integerops, tensorflow")
class ComposeTransform(BaseTransform):
"""Composes several transforms together.
Args:
transform_list (list of Transform objects): list of transforms to compose
Returns:
sample (tuple): tuple of processed image and label
"""
def __init__(self, transform_list):
self.transform_list = transform_list
def __call__(self, sample):
for transform in self.transform_list:
sample = transform(sample)
return sample
@transform_registry(transform_type="CropToBoundingBox", process="preprocess", \
framework="pytorch")
class CropToBoundingBox(BaseTransform):
"""Crops an image to a specified bounding box.
Args:
offset_height (int): Vertical coordinate of the top-left corner of the result in the input
offset_width (int): Horizontal coordinate of the top-left corner of the result in the input
target_height (int): Height of the result
target_width (int): Width of the result
Returns:
tuple of processed image and label
"""
def __init__(self, offset_height, offset_width, target_height, target_width):
self.offset_height = offset_height
self.offset_width = offset_width
self.target_height = target_height
self.target_width = target_width
def __call__(self, sample):
image, label = sample
image = torchvision.transforms.functional.crop(
image,
self.offset_height,
self.offset_width,
self.target_height,
self.target_width)
return (image, label)
@transform_registry(transform_type="CropToBoundingBox", process="preprocess", \
framework="mxnet")
class MXNetCropToBoundingBox(CropToBoundingBox):
"""Crops an image to a specified bounding box.
Args:
offset_height (int): Vertical coordinate of the top-left corner of the result in the input
offset_width (int): Horizontal coordinate of the top-left corner of the result in the input
target_height (int): Height of the result
target_width (int): Width of the result
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
image = mx.image.fixed_crop(
image,
self.offset_height,
self.offset_width,
self.target_height,
self.target_width)
return (image, label)
@transform_registry(transform_type="CropToBoundingBox", process="preprocess", \
framework="onnxrt_qlinearops, onnxrt_integerops")
class ONNXRTCropToBoundingBox(CropToBoundingBox):
"""Crops an image to a specified bounding box.
Args:
offset_height (int): Vertical coordinate of the top-left corner of the result in the input
offset_width (int): Horizontal coordinate of the top-left corner of the result in the input
target_height (int): Height of the result
target_width (int): Width of the result
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
image = image[self.offset_height : self.offset_height+self.target_height,
self.offset_width : self.offset_width+self.target_width, :]
return (image, label)
@transform_registry(transform_type="CropToBoundingBox", process="preprocess", \
framework="tensorflow")
class TensorflowCropToBoundingBox(CropToBoundingBox):
"""Crops an image to a specified bounding box.
Args:
offset_height (int): Vertical coordinate of the top-left corner of the result in the input
offset_width (int): Horizontal coordinate of the top-left corner of the result in the input
target_height (int): Height of the result
target_width (int): Width of the result
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
image = tf.image.crop_to_bounding_box(image, self.offset_height,
self.offset_width, self.target_height, self.target_width)
else:
image = image[self.offset_height : self.offset_height+self.target_height,
self.offset_width : self.offset_width+self.target_width, :]
return (image, label)
@transform_registry(transform_type="Transpose", process="preprocess", \
framework="onnxrt_qlinearops, onnxrt_integerops")
class Transpose(BaseTransform):
"""Transpose image according to perm.
Args:
perm (list): A permutation of the dimensions of input image
Returns:
tuple of processed image and label
"""
def __init__(self, perm):
self.perm = perm
def __call__(self, sample):
image, label = sample
assert len(image.shape) == len(self.perm), "Image rank doesn't match Perm rank"
image = np.transpose(image, axes=self.perm)
return (image, label)
@transform_registry(transform_type="Transpose", process="preprocess", framework="tensorflow")
class TensorflowTranspose(Transpose):
"""Transpose image according to perm.
Args:
perm (list): A permutation of the dimensions of input image
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
assert len(image.shape) == len(self.perm), "Image rank doesn't match Perm rank"
if isinstance(image, tf.Tensor):
image = tf.transpose(image, perm=self.perm)
else:
image = np.transpose(image, axes=self.perm)
return (image, label)
@transform_registry(transform_type="Transpose", process="preprocess", framework="mxnet")
class MXNetTranspose(Transpose):
"""Transpose image according to perm.
Args:
perm (list): A permutation of the dimensions of input image
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
assert len(image.shape) == len(self.perm), "Image rank doesn't match Perm rank"
image = mx.ndarray.transpose(image, self.perm)
return (image, label)
@transform_registry(transform_type="Transpose", process="preprocess", framework="pytorch")
class PyTorchTranspose(Transpose):
"""Transpose image according to perm.
Args:
perm (list): A permutation of the dimensions of input image
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
assert len(image.shape) == len(self.perm), "Image rank doesn't match Perm rank"
image = image.permute(self.perm)
return (image, label)
@transform_registry(transform_type="RandomVerticalFlip", process="preprocess", \
framework="onnxrt_qlinearops, onnxrt_integerops")
class RandomVerticalFlip(BaseTransform):
"""Vertically flip the given image randomly.
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
if np.random.rand(1)[0] > 0.5:
image = np.flipud(image)
return (image, label)
@transform_registry(transform_type="RandomVerticalFlip", process="preprocess", \
framework="tensorflow")
class TensorflowRandomVerticalFlip(BaseTransform):
"""Vertically flip the given image randomly.
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
image = tf.image.random_flip_up_down(image)
else:
if np.random.rand(1)[0] > 0.5:
image = np.flipud(image)
return (image, label)
@transform_registry(transform_type="RandomHorizontalFlip", process="preprocess", \
framework="onnxrt_qlinearops, onnxrt_integerops")
class RandomHorizontalFlip(BaseTransform):
"""Horizontally flip the given image randomly.
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
if np.random.rand(1)[0] > 0.5:
image = np.fliplr(image)
return (image, label)
@transform_registry(transform_type="RandomHorizontalFlip", process="preprocess", \
framework="tensorflow")
class TensorflowRandomHorizontalFlip(BaseTransform):
"""Horizontally flip the given image randomly.
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
image = tf.image.random_flip_left_right(image)
else:
if np.random.rand(1)[0] > 0.5:
image = np.fliplr(image)
return (image, label)
@transform_registry(transform_type="ToArray", process="preprocess", \
framework="onnxrt_qlinearops, onnxrt_integerops, tensorflow, pytorch, mxnet")
class ToArray(BaseTransform):
"""Convert PIL Image or NDArray to numpy array.
Returns:
tuple of processed image and label
"""
def __call__(self, sample):
from PIL import Image
image, label = sample
if isinstance(image, Image.Image):
image = np.array(image)
elif isinstance(image, mx.ndarray.NDArray): # pylint: disable=no-member
image = image.asnumpy()
else:
raise ValueError("Unknown image type!")
return (image, label)
np_dtype_map = {'int8': np.int8, 'uint8': np.uint8, 'complex64': np.complex64,
'uint16': np.uint16, 'int32': np.int32, 'uint32': np.uint32,
'int64': np.int64, 'uint64': np.uint64, 'float32': np.float32,
'float16': np.float16, 'float64': np.float64, 'bool': np.bool,
'string': np.str, 'complex128': np.complex128, 'int16': np.int16}
@transform_registry(transform_type="Cast",
process="general", framework="tensorflow")
class CastTFTransform(BaseTransform):
"""Convert image to given dtype.
Args:
dtype (str, default='float32'): A dtype to convert image to
Returns:
tuple of processed image and label
"""
def __init__(self, dtype='float32'):
self.tf_dtype_map = {'int16': tf.int16, 'uint8': tf.uint8, 'uint16': tf.uint16,
'uint32':tf.uint32, 'uint64': tf.uint64, 'complex64': tf.complex64,
'int32': tf.int32, 'int64':tf.int64, 'float32': tf.float32,
'float16': tf.float16, 'float64':tf.float64, 'bool': tf.bool,
'string': tf.string, 'int8': tf.int8, 'complex128': tf.complex128}
assert dtype in self.tf_dtype_map.keys(), 'Unknown dtype'
self.dtype = dtype
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
image = tf.image.convert_image_dtype(image, dtype=self.tf_dtype_map[self.dtype])
else:
image = image.astype(np_dtype_map[self.dtype])
return (image, label)
@transform_registry(transform_type="Cast",
process="general", framework="onnxrt_qlinearops, onnxrt_integerops")
class CastONNXTransform(BaseTransform):
"""Convert image to given dtype.
Args:
dtype (str, default='float32'): A dtype to convert image to
Returns:
tuple of processed image and label
"""
def __init__(self, dtype='float32'):
assert dtype in np_dtype_map.keys(), 'Unknown dtype'
self.dtype = dtype
def __call__(self, sample):
image, label = sample
image = image.astype(np_dtype_map[self.dtype])
return (image, label)
@transform_registry(transform_type="Cast", process="general", framework="pytorch")
class CastPyTorchTransform(BaseTransform):
"""Convert image to given dtype.
Args:
dtype (str, default='float32'): A dtype to convert image to
Returns:
tuple of processed image and label
"""
def __init__(self, dtype='float32'):
dtype_map = {'int8': torch.int8, 'uint8': torch.uint8, 'complex128': torch.complex128,
'int32':torch.int32, 'int64':torch.int64, 'complex64': torch.complex64,
'bfloat16':torch.bfloat16, 'float64':torch.float64, 'bool': torch.bool,
'float16':torch.float16, 'int16':torch.int16, 'float32': torch.float32}
assert dtype in dtype_map.keys(), 'Unknown dtype'
self.dtype = dtype_map[dtype]
def __call__(self, sample):
image, label = sample
image = image.type(self.dtype)
return (image, label)
@transform_registry(transform_type="CenterCrop",
process="preprocess", framework="tensorflow")
class CenterCropTFTransform(BaseTransform):
"""Crops the given image at the center to the given size.
Args:
size (list or int): Size of the result
Returns:
tuple of processed image and label
"""
def __init__(self, size):
if isinstance(size, int):
self.size = size, size
elif isinstance(size, list):
if len(size) == 1:
self.size = size[0], size[0]
elif len(size) == 2:
self.size = size[0], size[1]
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
if len(image.shape) == 3:
height, width = image.shape[0:2]
elif len(image.shape) == 4:
height, width = image.shape[1:3]
else:
raise ValueError("Unknown image shape")
if height < self.size[0] or width < self.size[1]:
raise ValueError("Target size shouldn't be lager than image size")
y0 = (height - self.size[0]) // 2
x0 = (width - self.size[1]) // 2
image = tf.image.crop_to_bounding_box(image, y0, x0, self.size[0], self.size[1])
else:
transform = CenterCropTransform(self.size)
image, label = transform(sample)
return (image, label)
@transform_registry(transform_type="PaddedCenterCrop", process="preprocess", \
framework="tensorflow")
class PaddedCenterCropTransform(BaseTransform):
def __init__(self, size, crop_padding=0):
if isinstance(size, int):
self.image_size = size
elif isinstance(size, list):
if len(size) == 1:
self.image_size = size[0]
elif len(size) == 2:
if size[0] != size[1]:
raise ValueError("'crop height must eaqual to crop width'")
self.image_size = size[0]
self.crop_padding = crop_padding
def __call__(self, sample):
image, label = sample
h, w = image.shape[0], image.shape[1]
padded_center_crop_size = \
int((self.image_size / (self.image_size + self.crop_padding)) * min(h, w))
y0 = (h - padded_center_crop_size + 1) // 2
x0 = (w - padded_center_crop_size + 1) // 2
image = image[y0:y0 + padded_center_crop_size, x0:x0 + padded_center_crop_size, :]
return (image, label)
@transform_registry(transform_type="Resize",
process="preprocess", framework="tensorflow")
class ResizeTFTransform(BaseTransform):
"""Resize the input image to the given size.
Args:
size (list or int): Size of the result
interpolation (str, default='bilinear'):Desired interpolation type,
support 'bilinear', 'nearest', 'bicubic'
Returns:
tuple of processed image and label
"""
def __init__(self, size, interpolation='bilinear'):
if isinstance(size, int):
self.size = size, size
elif isinstance(size, list):
if len(size) == 1:
self.size = size[0], size[0]
elif len(size) == 2:
self.size = size[0], size[1]
self.interpolation = interpolation
if self.interpolation not in ['bilinear', 'nearest', 'bicubic']:
raise ValueError('Unsupported interpolation type!')
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
image = tf.image.resize(image, self.size, method=self.interpolation)
else:
image = cv2.resize(image, self.size,
interpolation=interpolation_map[self.interpolation])
return (image, label)
@transform_registry(transform_type="Resize", process="preprocess", \
framework="pytorch")
class ResizePytorchTransform(BaseTransform):
"""Resize the input image to the given size.
Args:
size (list or int): Size of the result
interpolation (str, default='bilinear'):Desired interpolation type,
support 'bilinear', 'nearest', 'bicubic'
Returns:
tuple of processed image and label
"""
def __init__(self, size, interpolation='bilinear'):
self.size = size
if interpolation in interpolation_pytorch_map.keys():
self.interpolation = interpolation_pytorch_map[interpolation]
else:
raise ValueError("Undefined interpolation type")
def __call__(self, sample):
image, label = sample
transformer = torchvision.transforms.Resize(size=self.size,
interpolation=self.interpolation)
return (transformer(image), label)
@transform_registry(transform_type="RandomCrop",
process="preprocess", framework="tensorflow")
class RandomCropTFTransform(BaseTransform):
"""Crop the image at a random location to the given size.
Args:
size (list or tuple or int): Size of the result
Returns:
tuple of processed image and label
"""
def __init__(self, size):
if isinstance(size, int):
self.size = size, size
elif isinstance(size, list) or isinstance(size, tuple):
if len(size) == 1:
self.size = size[0], size[0]
elif len(size) == 2:
self.size = size[0], size[1]
def __call__(self, sample):
image, label = sample
if isinstance(image, tf.Tensor):
if len(image.shape) == 3:
height, width = image.shape[0:2]
elif len(image.shape) == 4:
height, width = image.shape[1:3]
if self.size[0] > height or self.size[1] > width:
raise ValueError('Crop size must be smaller than image size')
if self.size[0] == height and self.size[1] == width:
return (image, label)
height = tf.cast(height, dtype=tf.float32)
width = tf.cast(width, dtype=tf.float32)
offset_height = (height - self.size[0]) / 2
offset_width = (width - self.size[1]) / 2
offset_height = tf.cast(offset_height, dtype=tf.int32)
offset_width = tf.cast(offset_width, dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, offset_height,
offset_width, self.size[0], self.size[1])
else:
transform = RandomCropTransform(self.size)
image, label = transform(sample)
return (image, label)
@transform_registry(transform_type="RandomResizedCrop", process="preprocess", \
framework="pytorch")
class RandomResizedCropPytorchTransform(BaseTransform):
"""Crop the given image to random size and aspect ratio.
Args:
size (list or int):
Size of the result
scale (tuple or list, default=(0.08, 1.0)):
range of size of the origin size cropped
ratio (tuple or list, default=(3. / 4., 4. / 3.)):
range of aspect ratio of the origin aspect ratio cropped
interpolation (str, default='bilinear'):
Desired interpolation type, support 'bilinear', 'nearest', 'bicubic'
Returns:
tuple of processed image and label
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
self.size = size
self.scale = scale
self.ratio = ratio
if interpolation in interpolation_pytorch_map.keys():
self.interpolation = interpolation_pytorch_map[interpolation]
else:
raise ValueError("Undefined interpolation type")
if scale[0] > scale[1] or ratio[0] > ratio[1]:
raise ValueError("Scale and ratio should be of kind (min, max)")
def __call__(self, sample):
image, label = sample
transformer = torchvision.transforms.RandomResizedCrop(size=self.size,
scale=self.scale, ratio=self.ratio, interpolation=self.interpolation)
return (transformer(image), label)
@transform_registry(transform_type="RandomResizedCrop", process="preprocess", \
framework="mxnet")
class RandomResizedCropMXNetTransform(BaseTransform):
"""Crop the given image to random size and aspect ratio.
Args:
size (list or int):
Size of the result
scale (tuple or list, default=(0.08, 1.0)):
range of size of the origin size cropped
ratio (tuple or list, default=(3. / 4., 4. / 3.)):
range of aspect ratio of the origin aspect ratio cropped
interpolation (str, default='bilinear'):
Desired interpolation type, support 'bilinear', 'nearest', 'bicubic'
Returns:
tuple of processed image and label
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
if isinstance(size, int):
self.size = size, size
elif isinstance(size, list):
if len(size) == 1:
self.size = size[0], size[0]
elif len(size) == 2:
self.size = size[1], size[0]
self.scale = scale
self.ratio = ratio
if interpolation in interpolation_mxnet_map.keys():
self.interpolation = interpolation_mxnet_map[interpolation]
else:
raise ValueError("Undefined interpolation type")
if scale[0] > scale[1] or ratio[0] > ratio[1]:
raise ValueError("Scale and ratio should be of kind (min, max)")
def __call__(self, sample):
image, label = sample
transformer = mx.gluon.data.vision.transforms.RandomResizedCrop(size=self.size,
scale=self.scale, ratio=self.ratio, interpolation=self.interpolation)
return (transformer(image), label)
@transform_registry(transform_type="RandomResizedCrop",
process="preprocess", framework="tensorflow")
class RandomResizedCropTFTransform(BaseTransform):
"""Crop the given image to random size and aspect ratio.
Args:
size (list or int):
Size of the result
scale (tuple or list, default=(0.08, 1.0)):
range of size of the origin size cropped
ratio (tuple or list, default=(3. / 4., 4. / 3.)):
range of aspect ratio of the origin aspect ratio cropped
interpolation (str, default='bilinear'):
Desired interpolation type, support 'bilinear', 'nearest'
Returns:
tuple of processed image and label
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(
3. / 4., 4. / 3.), interpolation='bilinear'):
if isinstance(size, int):
self.size = size, size
elif isinstance(size, list):
if len(size) == 1:
self.size = size[0], size[0]
elif len(size) == 2:
self.size = size[0], size[1]
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
if self.interpolation not in ['bilinear', 'nearest']:
raise ValueError('Unsupported interpolation type!')
if scale[0] > scale[1] or ratio[0] > ratio[1]:
raise ValueError("Scale and ratio should be of kind (min, max)")
def get_params(self, image, scale, ratio):
shape = image.shape
height = tf.cast(shape[0], dtype=tf.float32)
width = tf.cast(shape[1], dtype=tf.float32)
src_area = height * width
for _ in range(10):
target_area = np.random.uniform(scale[0], scale[1]) * src_area
log_ratio = (np.log(ratio[0]), np.log(ratio[1]))
new_ratio = np.exp( | np.random.uniform(log_ratio[0], log_ratio[1]) | numpy.random.uniform |
import time
import numpy as np
class Agent(object):
"""Base class for all Reinforcement Learning agents."""
def __init__(self):
super().__init__()
# self._logger = logging.getLogger('{}.{}'.format(__name__, type(self).__name__))
def seed(self, seed=None):
"""Seed the random number generator.
Parameters
----------
seed : int, optional
Seed for random number generator.
Note
----
Base method does nothing.
"""
pass
def run(self):
"""Train the agent.
Note
----
Base method does nothing.
"""
pass
class SimpleAgent(Agent):
"""Abstract agent that implements a typical RL flow."""
def __init__(self, n_episodes, env, validation_freq=None, validation_episodes=None,
converter=None, callback=None):
"""
Parameters
----------
n_episodes : int
Number of episodes to train the agent for.
env : obj
Environment
validation_freq : int, optional
Specifies how many episodes to run before a new validation run is performed.
validation_episodes : int, optional
Number of episodes in each validation run.
converter : Converter, optional
If specified, allows to pre/post process state, action, or experience.
callback : Callback, optional
If specified, called at certain points during agent training.
"""
super().__init__()
self._n_episodes = n_episodes
self._env = env
self._converter = converter
self._validation_freq = validation_freq
self._validation_episodes = validation_episodes
self._callback = callback
self._global_step = None
def run(self):
"""Train the agent by running `self._n_episodes` episodes.
Returns
-------
list
Validation rewards if `validation_freq` is specified,
training rewards otherwise.
obj
Some useful training statistics.
"""
if self._callback:
self._callback.on_before_run(n_episodes=self._n_episodes)
training_rewards = []
validation_rewards = []
self._global_step = 0
stats = []
for episode_no in range(self._n_episodes):
episode_start = time.perf_counter()
self._before_episode(episode_no)
reward, n_episode_steps = self._run_episode(episode_no)
self._after_episode(episode_no, reward)
episode_stop = time.perf_counter()
stats.append(episode_stop - episode_start)
training_rewards.append(reward)
if self._validation_freq and (episode_no + 1) % self._validation_freq == 0:
self._before_validation()
rewards = []
for _ in range(self._validation_episodes):
reward = self._validate_episode()
rewards.append(reward)
validation_rewards.append(rewards)
self._after_validation(rewards)
rewards = validation_rewards if self._validation_freq else training_rewards
stats = | np.array(stats) | numpy.array |
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_allclose, assert_equal, assert_warns, assert_raises)
from statsmodels.tsa.arima.datasets.brockwell_davis_2002 import lake, oshorts
from statsmodels.tsa.arima.estimators.gls import gls
@pytest.mark.low_precision('Test against Example 6.6.1 in Brockwell and Davis'
' (2016)')
def test_brockwell_davis_example_661():
endog = oshorts.copy()
exog = np.ones_like(endog)
# Here we restrict the iterations to 1 and test against the values in the
# text (set tolerance=1 to suppress to warning that it didn't converge)
res, _ = gls(endog, exog, order=(0, 0, 1), max_iter=1, tolerance=1)
assert_allclose(res.exog_params, -4.745, atol=1e-3)
assert_allclose(res.ma_params, -0.818, atol=1e-3)
assert_allclose(res.sigma2, 2041, atol=1)
# Here we do not restrict the iterations and test against the values in
# the last row of Table 6.2 (note: this table does not report sigma2)
res, _ = gls(endog, exog, order=(0, 0, 1))
assert_allclose(res.exog_params, -4.780, atol=1e-3)
assert_allclose(res.ma_params, -0.848, atol=1e-3)
@pytest.mark.low_precision('Test against Example 6.6.2 in Brockwell and Davis'
' (2016)')
def test_brockwell_davis_example_662():
endog = lake.copy()
exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]
res, _ = gls(endog, exog, order=(2, 0, 0))
# Parameter values taken from Table 6.3 row 2, except for sigma2 and the
# last digit of the exog_params[0], which were given in the text
assert_allclose(res.exog_params, [10.091, -.0216], atol=1e-3)
assert_allclose(res.ar_params, [1.005, -.291], atol=1e-3)
assert_allclose(res.sigma2, .4571, atol=1e-3)
def test_integrated():
# Get the lake data
endog1 = lake.copy()
exog1 = np.c_[np.ones_like(endog1), np.arange(1, len(endog1) + 1) * 1.0]
endog2 = np.r_[0, np.cumsum(endog1)]
exog2 = np.c_[[0, 0], np.cumsum(exog1, axis=0).T].T
# Estimate without integration
p1, _ = gls(endog1, exog1, order=(1, 0, 0))
# Estimate with integration
with assert_warns(UserWarning):
p2, _ = gls(endog2, exog2, order=(1, 1, 0))
assert_allclose(p1.params, p2.params)
def test_integrated_invalid():
# Test for invalid versions of integrated model
# - include_constant=True is invalid if integration is present
endog = lake.copy()
exog = np.arange(1, len(endog) + 1) * 1.0
assert_raises(ValueError, gls, endog, exog, order=(1, 1, 0),
include_constant=True)
def test_results():
endog = lake.copy()
exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]
# Test for results output
p, res = gls(endog, exog, order=(1, 0, 0))
assert_('params' in res)
assert_('converged' in res)
assert_('differences' in res)
assert_('iterations' in res)
assert_('arma_estimator' in res)
assert_('arma_results' in res)
assert_(res.converged)
assert_(res.iterations > 0)
assert_equal(res.arma_estimator, 'innovations_mle')
assert_equal(len(res.params), res.iterations + 1)
assert_equal(len(res.differences), res.iterations + 1)
assert_equal(len(res.arma_results), res.iterations + 1)
| assert_equal(res.params[-1], p) | numpy.testing.assert_equal |
#!/usr/bin/env python
import xml.etree.ElementTree as ET
import numpy as np
from scipy.optimize import minimize
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
data_files = ['output_6_all_points.xml', 'output_7_all_points.xml', 'output_8_all_points.xml',
'output_9_all_points.xml', 'output_10_all_points.xml']
confidence_level = 100
xyz = []
dist = []
echosounder_position = []
def optimize_position(P_0):
global xyz
error_vector = np.array([0.0, 0.0, 0.0])
for i in range(np.size(xyz,0)):
X_i = np.array(xyz[i])
point_vector = X_i - P_0
magnitude_vector = (np.linalg.norm(point_vector) - dist[i]) * point_vector / np.linalg.norm(point_vector)
error_vector = np.add(error_vector, magnitude_vector)
error_magnitude = np.linalg.norm(error_vector)
return error_magnitude / np.size(xyz,0)
def get_echosounder_position():
# Optimize echosounder position
echosounder_initial_position = np.array([0.0, 0.0, 0.0])
res = minimize(optimize_position, echosounder_initial_position, method='nelder-mead', options={'xatol': 1e-8, 'disp': True})
echosounder_position = res.x
print("\nOptimize position:\n" + str(res))
print("\nEchosounder position: " + str(echosounder_position) + "\n")
return echosounder_position
def optimize_direction(axis):
axis = axis / np.linalg.norm(axis)
points_outside = True
angle = 0.0
while points_outside:
points_outside = False
for i in xyz:
cone_dist = np.dot(i - echosounder_position, axis)
cone_radius = cone_dist * math.tan(angle)
orth_vect = np.dot(i - echosounder_position, axis) / np.dot(axis, axis)
orth_vect = orth_vect * axis + echosounder_position - i
orth_dist = np.linalg.norm(orth_vect)
if orth_dist > cone_radius:
angle = angle + 0.01
points_outside = True
return angle
def get_echosounder_direction():
# Initial echosounder direction axis
init_direction_vector = | np.array([0.0, 0.0, 0.0]) | numpy.array |
import matplotlib
matplotlib.use('MacOSX')
import matplotlib.pyplot as plt
import healpy as hp
#import astropy.io.fits as pyfits
import fitsio
import numpy as np
import plot_lib as plib
print('Healpy version', hp.__version__)
GeV = 1.60218e-10
def get_header_info(fits_map_filename):
h = fitsio.read_header(fits_map_filename, ext=1)
NSIDE = h["NSIDE"]
print ("NSIDE: %i" % NSIDE)
print ("approx. resolution: %3.1f deg" % (hp.nside2resol(NSIDE, arcmin=True) / 60))
print ("number of pixels: %i" % hp.nside2npix(NSIDE))
print ("Process: %s" % h["PROCESS"])
print ("Units: %s" % h["TUNIT1"])
return NSIDE
def get_map(fits_map_filename):
h = fitsio.read_header(fits_map_filename, ext=1)
n_entries = h["NAXIS2"]
NSIDE = h["NSIDE"]
assert(n_entries == hp.nside2npix(NSIDE))
fits = fitsio.FITS(fits_map_filename, iter_row_buffer=10000)
hmap = []
for i in range(n_entries):
flux = fits[1][i][0]
hmap.append(flux)
print ("Read map from %3.1e to %3.1e with %i pixels" % (min(hmap), max(hmap), n_entries))
print ("Mean flux: ", np.mean(hmap))
print ("Total flux: ", sum(hmap))
return np.array(hmap)
def compute_map_slope(map_nu1, map_nu2, nu1, nu2, b, l):
b_size, l_size = len(b), len(l)
slopes = | np.zeros((b_size, l_size)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
By <NAME>
Modified from https://github.com/dong-x16/PortraitNet
"""
import torch
import cv2
import numpy as np
from PIL import Image
from data_augments import data_aug_blur, data_aug_color, data_aug_noise
from data_augments import data_aug_flip, aug_matrix
from data_augments import show_edge, Normalize_Img
class PortraitSegDatasetAug(torch.utils.data.Dataset):
""" join the x, y into a dataset """
def __init__(self, imgs, masks, aug=True,
angle_range=45,zoom=0.5,noise_scale=10.0):
"""
Args:
imgs(tensor): loaded x image dataset
"""
self.imgs = imgs
self.masks = masks
self.aug = aug
self.angle_range = angle_range
self.zoom = zoom
self.noise_scale = noise_scale
self.input_width = 64
self.input_height = 64
self.padding_color = 128
self.img_scale = 1.
self.img_mean = [128., 128., 128.] # BGR
self.img_val = [2./255., 2./255., 2./255.] # BGR
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img = None
mask = None
bbox = None
H = None
if torch.is_tensor(idx):
idx = idx.tolist()
img = self.imgs[idx]
mask = self.masks[idx]
if self.aug:
height, width, channel = img.shape
bbox = [0, 0, width-1, height-1]
H = aug_matrix(width, height, bbox, self.input_width, self.input_height,
angle_range=(-self.angle_range, self.angle_range),
scale_range=(self.zoom, 1/self.zoom),
offset=self.input_height/4)
border_col = ( | np.random.randint(0,255) | numpy.random.randint |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading and writing uvfits files."""
import os
import copy
import warnings
import numpy as np
from astropy import constants as const
from astropy.time import Time
from astropy.io import fits
from .uvdata import UVData
from .. import utils as uvutils
__all__ = ["UVFITS"]
class UVFITS(UVData):
"""
Defines a uvfits-specific subclass of UVData for reading and writing uvfits.
This class should not be interacted with directly, instead use the read_uvfits
and write_uvfits methods on the UVData class.
Attributes
----------
uvfits_required_extra : list of str
Names of optional UVParameters that are required for uvfits.
"""
uvfits_required_extra = [
"antenna_positions",
"gst0",
"rdate",
"earth_omega",
"dut1",
"timesys",
]
def _get_parameter_data(
self, vis_hdu, run_check_acceptability, background_lsts=True,
):
"""
Read just the random parameters portion of the uvfits file ("metadata").
Separated from full read so that header, metadata and data can be read
independently.
"""
# astropy.io fits reader scales date according to relevant PZER0 (?)
# uvfits standard is to have 2 DATE parameters, both floats:
# DATE (full day) and _DATE (fractional day)
# cotter uvfits files have one DATE that is a double
# using data.par('date') is general -- it will add them together if there are 2
self.time_array = vis_hdu.data.par("date")
self.Ntimes = len(np.unique(self.time_array))
# check if lst array is saved. It's not a standard metadata item in uvfits,
# but if the file was written with pyuvdata it may be present
# (depending on pyuvdata version)
proc = None
if "LST" in vis_hdu.data.parnames:
# angles in uvfits files are stored in degrees, so convert to radians
self.lst_array = np.deg2rad(vis_hdu.data.par("lst"))
if run_check_acceptability:
(
latitude,
longitude,
altitude,
) = self.telescope_location_lat_lon_alt_degrees
lst_array = uvutils.get_lst_for_time(
self.time_array, latitude, longitude, altitude
)
if not np.all(
np.isclose(
self.lst_array,
lst_array,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
):
warnings.warn(
"LST values stored in this file are not "
"self-consistent with time_array and telescope "
"location. Consider recomputing with "
"utils.get_lst_for_time."
)
else:
proc = self.set_lsts_from_time_array(background=background_lsts)
# if antenna arrays are present, use them. otherwise use baseline array
if "ANTENNA1" in vis_hdu.data.parnames and "ANTENNA2" in vis_hdu.data.parnames:
# Note: uvfits antennas are 1 indexed,
# need to subtract one to get to 0-indexed
self.ant_1_array = np.int32(vis_hdu.data.par("ANTENNA1")) - 1
self.ant_2_array = np.int32(vis_hdu.data.par("ANTENNA2")) - 1
subarray = np.int32(vis_hdu.data.par("SUBARRAY")) - 1
# error on files with multiple subarrays
if len(set(subarray)) > 1:
raise ValueError(
"This file appears to have multiple subarray "
"values; only files with one subarray are "
"supported."
)
else:
# cannot set this to be the baseline array because it uses the
# 256 convention, not our 2048 convention
bl_input_array = np.int64(vis_hdu.data.par("BASELINE"))
# get antenna arrays based on uvfits baseline array
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
bl_input_array
)
# check for multi source files. NOW SUPPORTED, W00T!
if "SOURCE" in vis_hdu.data.parnames:
# Preserve the source info just in case the AIPS SU table is missing, and
# we need to revert things back.
self._set_multi_phase_center(preserve_phase_center_info=True)
source = vis_hdu.data.par("SOURCE")
self.phase_center_id_array = source.astype(int)
# get self.baseline_array using our convention
self.baseline_array = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array
)
self.Nbls = len(np.unique(self.baseline_array))
# initialize internal variables based on the antenna lists
self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)
# read baseline vectors in units of seconds, return in meters
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
self.uvw_array = (-1) * (
np.array(
np.stack(
(
vis_hdu.data.par("UU"),
vis_hdu.data.par("VV"),
vis_hdu.data.par("WW"),
)
)
)
* const.c.to("m/s").value
).T
if "INTTIM" in vis_hdu.data.parnames:
self.integration_time = np.asarray(
vis_hdu.data.par("INTTIM"), dtype=np.float64
)
else:
if self.Ntimes > 1:
# assume that all integration times in the file are the same
int_time = self._calc_single_integration_time()
self.integration_time = (
np.ones_like(self.time_array, dtype=np.float64) * int_time
)
else:
warnings.warn(
"The integration time is not specified and only one time is "
"present so it cannot be calculated from the difference between "
"integration times. Setting to None which will cause the check to "
"error. Set `run_check` to False to read in the file without "
"checking. Then set the integration_time (to an array of length "
"Nblts) directly on the object to allow futher processing."
)
if proc is not None:
proc.join()
def _get_data(
self,
vis_hdu,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
read_metadata,
keep_all_metadata,
run_check,
check_extra,
run_check_acceptability,
strict_uvw_antpos_check,
fix_old_proj,
fix_use_ant_pos,
):
"""
Read just the visibility and flag data of the uvfits file.
Separated from full read so header and metadata can be read without data.
"""
# figure out what data to read in
blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess(
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
)
if blt_inds is not None:
blt_frac = len(blt_inds) / float(self.Nblts)
else:
blt_frac = 1
if freq_inds is not None:
freq_frac = len(freq_inds) * float(self.Nspws) / float(self.Nfreqs)
else:
freq_frac = 1
if pol_inds is not None:
pol_frac = len(pol_inds) / float(self.Npols)
else:
pol_frac = 1
min_frac = np.min([blt_frac, freq_frac, pol_frac])
if min_frac == 1:
# no select, read in all the data
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :]
else:
# do select operations on everything except data_array, flag_array
# and nsample_array
self._select_metadata(
blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata
)
# just read in the right portions of the data and flag arrays
if blt_frac == min_frac:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if freq_frac < 1:
raw_data_array = raw_data_array[:, :, freq_inds, :, :]
if pol_frac < 1:
raw_data_array = raw_data_array[:, :, :, pol_inds, :]
elif freq_frac == min_frac:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, :, :, :, freq_inds, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, :, :, freq_inds, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if blt_frac < 1:
raw_data_array = raw_data_array[blt_inds, :, :, :, :]
if pol_frac < 1:
raw_data_array = raw_data_array[:, :, :, pol_inds, :]
else:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, :, :, :, :, pol_inds, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, :, :, :, pol_inds, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if blt_frac < 1:
raw_data_array = raw_data_array[blt_inds, :, :, :, :]
if freq_frac < 1:
raw_data_array = raw_data_array[:, :, freq_inds, :, :]
assert len(raw_data_array.shape) == 5
# Reshape the data array to be the right size if we are working w/ multiple
# spectral windows to be 'flex_spw' compliant
if self.Nspws > 1:
raw_data_array = np.reshape(
raw_data_array,
(self.Nblts, 1, self.Nfreqs, self.Npols, raw_data_array.shape[4]),
)
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
self.data_array = (
raw_data_array[:, :, :, :, 0] - 1j * raw_data_array[:, :, :, :, 1]
)
self.flag_array = raw_data_array[:, :, :, :, 2] <= 0
self.nsample_array = np.abs(raw_data_array[:, :, :, :, 2])
if fix_old_proj:
self.fix_phase(use_ant_pos=fix_use_ant_pos)
# check if object has all required UVParameters set
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
allow_flip_conj=True,
)
def read_uvfits(
self,
filename,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in header, metadata and data from a uvfits file.
Supports reading only selected portions of the data.
Parameters
----------
filename : str
The uvfits file to read from.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be
length 2. Some of the times in the object should fall between the
first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data have multi spw with different channel widths.
If the metadata are not internally consistent or missing.
"""
# update filename attribute
basename = os.path.basename(filename)
self.filename = [basename]
self._filename.form = (1,)
with fits.open(filename, memmap=True) as hdu_list:
vis_hdu = hdu_list[0] # assumes the visibilities are in the primary hdu
vis_hdr = vis_hdu.header.copy()
hdunames = uvutils._fits_indexhdus(hdu_list) # find the rest of the tables
# First get everything we can out of the header.
self._set_phased()
# check if we have an spw dimension
if vis_hdr["NAXIS"] == 7:
self.Nspws = vis_hdr.pop("NAXIS5")
self.spw_array = (
uvutils._fits_gethduaxis(vis_hdu, 5).astype(np.int64) - 1
)
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL6"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL7"))
else:
self.Nspws = 1
self.spw_array = np.array([np.int64(0)])
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL5"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL6"))
# get shapes
self.Npols = vis_hdr.pop("NAXIS3")
self.Nblts = vis_hdr.pop("GCOUNT")
if self.Nspws > 1:
# If this is multi-spw, use the 'flexible' spectral window setup
self._set_flex_spw()
uvfits_nchan = vis_hdr.pop("NAXIS4")
self.Nfreqs = uvfits_nchan * self.Nspws
self.flex_spw_id_array = np.transpose(
np.tile(np.arange(self.Nspws), (uvfits_nchan, 1))
).flatten()
fq_hdu = hdu_list[hdunames["AIPS FQ"]]
assert self.Nspws == fq_hdu.header["NO_IF"]
# TODO: This is fine for now, although I (karto) think that this
# is relative to the ref_freq, which can be specified as part of
# the AIPS SU table.
# Get rest freq value
ref_freq = uvutils._fits_gethduaxis(vis_hdu, 4)[0]
self.channel_width = np.transpose(
np.tile(abs(fq_hdu.data["CH WIDTH"]), (uvfits_nchan, 1))
).flatten()
self.freq_array = np.reshape(
np.transpose(
(
ref_freq
+ fq_hdu.data["IF FREQ"]
+ np.outer(np.arange(uvfits_nchan), fq_hdu.data["CH WIDTH"])
)
),
(1, -1),
)
else:
self.Nfreqs = vis_hdr.pop("NAXIS4")
self.freq_array = uvutils._fits_gethduaxis(vis_hdu, 4)
# TODO: Spw axis to be collapsed in future release
self.freq_array.shape = (1,) + self.freq_array.shape
self.channel_width = vis_hdr.pop("CDELT4")
self.polarization_array = np.int32(uvutils._fits_gethduaxis(vis_hdu, 3))
# other info -- not required but frequently used
self.object_name = vis_hdr.pop("OBJECT", None)
self.telescope_name = vis_hdr.pop("TELESCOP", None)
self.instrument = vis_hdr.pop("INSTRUME", None)
latitude_degrees = vis_hdr.pop("LAT", None)
longitude_degrees = vis_hdr.pop("LON", None)
altitude = vis_hdr.pop("ALT", None)
self.x_orientation = vis_hdr.pop("XORIENT", None)
blt_order_str = vis_hdr.pop("BLTORDER", None)
if blt_order_str is not None:
self.blt_order = tuple(blt_order_str.split(", "))
if self.blt_order == ("bda",):
self._blt_order.form = (1,)
self.history = str(vis_hdr.get("HISTORY", ""))
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
self.vis_units = vis_hdr.pop("BUNIT", "uncalib")
# Added here as a fix since some previous versions of UVData allowed for
# all caps versions of UNCALIB.
if self.vis_units == "UNCALIB":
self.vis_units = "uncalib"
self.phase_center_epoch = vis_hdr.pop("EPOCH", None)
# PHSFRAME is not a standard UVFITS keyword, but was used by older
# versions of pyuvdata. To ensure backwards compatibility, we look
# for it first to determine the coordinate frame for the data
self.phase_center_frame = vis_hdr.pop("PHSFRAME", None)
# If we don't find the special keyword PHSFRAME, try for the more
# FITS-standard RADESYS
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYS", None)
# If we still don't find anything, try the two 'special' variant names
# for the coordinate frame that seem to have been documented
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYSA", None)
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYSa", None)
# If we _still_ can't find anything, take a guess based on the value
# listed in the EPOCH. The behavior listed here is based off of the
# AIPS task REGRD (http://www.aips.nrao.edu/cgi-bin/ZXHLP2.PL?REGRD)
if self.phase_center_frame is None:
if self.phase_center_epoch is None:
self.phase_center_frame = "icrs"
else:
frame = "fk4" if (self.phase_center_epoch == 1950.0) else "fk5"
self.phase_center_frame = frame
self.extra_keywords = uvutils._get_fits_extra_keywords(
vis_hdr, keywords_to_skip=["DATE-OBS"]
)
# Next read the antenna table
ant_hdu = hdu_list[hdunames["AIPS AN"]]
# stuff in the header
if self.telescope_name is None:
self.telescope_name = ant_hdu.header["ARRNAM"]
self.gst0 = ant_hdu.header["GSTIA0"]
self.rdate = ant_hdu.header["RDATE"]
self.earth_omega = ant_hdu.header["DEGPDY"]
self.dut1 = ant_hdu.header["UT1UTC"]
if "TIMESYS" in ant_hdu.header.keys():
self.timesys = ant_hdu.header["TIMESYS"]
else:
# CASA misspells this one
self.timesys = ant_hdu.header["TIMSYS"]
if "FRAME" in ant_hdu.header.keys():
xyz_telescope_frame = ant_hdu.header["FRAME"]
else:
warnings.warn(
"Required Antenna keyword 'FRAME' not set; "
"Assuming frame is 'ITRF'."
)
xyz_telescope_frame = "ITRF"
# get telescope location and antenna positions.
# VLA incorrectly sets ARRAYX/ARRAYY/ARRAYZ to 0, and puts array center
# in the antenna positions themselves
if (
np.isclose(ant_hdu.header["ARRAYX"], 0)
and np.isclose(ant_hdu.header["ARRAYY"], 0)
and np.isclose(ant_hdu.header["ARRAYZ"], 0)
):
x_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 0])
y_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 1])
z_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 2])
self.antenna_positions = ant_hdu.data.field("STABXYZ") - np.array(
[x_telescope, y_telescope, z_telescope]
)
else:
x_telescope = ant_hdu.header["ARRAYX"]
y_telescope = ant_hdu.header["ARRAYY"]
z_telescope = ant_hdu.header["ARRAYZ"]
# AIPS memo #117 says that antenna_positions should be relative to
# the array center, but in a rotated ECEF frame so that the x-axis
# goes through the local meridian.
rot_ecef_positions = ant_hdu.data.field("STABXYZ")
latitude, longitude, altitude = uvutils.LatLonAlt_from_XYZ(
np.array([x_telescope, y_telescope, z_telescope]),
check_acceptability=run_check_acceptability,
)
self.antenna_positions = uvutils.ECEF_from_rotECEF(
rot_ecef_positions, longitude
)
if xyz_telescope_frame == "ITRF":
self.telescope_location = np.array(
[x_telescope, y_telescope, z_telescope]
)
else:
if (
latitude_degrees is not None
and longitude_degrees is not None
and altitude is not None
):
self.telescope_location_lat_lon_alt_degrees = (
latitude_degrees,
longitude_degrees,
altitude,
)
# stuff in columns
ant_names = ant_hdu.data.field("ANNAME").tolist()
self.antenna_names = []
for ant_ind, name in enumerate(ant_names):
# Sometimes CASA writes antnames as bytes not strings.
# If the ant name is shorter than 8 characters, the trailing
# characters may be non-ascii.
# This is technically a FITS violation as FITS requires ascii.
# So we just ignore any non-ascii bytes in the decode.
if isinstance(name, bytes):
ant_name_str = str(name.decode("utf-8", "ignore"))
else:
ant_name_str = name
# remove non-printing ascii characters and exclamation points
ant_name_str = (
ant_name_str.replace("\x00", "")
.replace("\x07", "")
.replace("!", "")
)
self.antenna_names.append(ant_name_str)
# subtract one to get to 0-indexed values rather than 1-indexed values
self.antenna_numbers = ant_hdu.data.field("NOSTA") - 1
self.Nants_telescope = len(self.antenna_numbers)
if "DIAMETER" in ant_hdu.columns.names:
self.antenna_diameters = ant_hdu.data.field("DIAMETER")
try:
self.set_telescope_params()
except ValueError as ve:
warnings.warn(str(ve))
# Now read in the random parameter info
self._get_parameter_data(
vis_hdu, run_check_acceptability, background_lsts=background_lsts,
)
# If we find the source attribute in the FITS random paramter list,
# the multi_phase_center attribute will be set to True, and we should also
# expect that there must be an AIPS SU table.
if self.multi_phase_center and "AIPS SU" not in hdunames.keys():
warnings.warn(
"UVFITS file is missing AIPS SU table, which is required when "
"SOURCE is one of the `random paramters` in the main binary "
"table. Bypassing for now, but note that this file _may_ not "
"work correctly in UVFITS-based programs (e.g., AIPS, CASA)."
)
name = list(self.phase_center_catalog.keys())[0]
self.phase_center_ra = self.phase_center_catalog[name]["cat_lon"]
self.phase_center_dec = self.phase_center_catalog[name]["cat_lat"]
self.phase_center_frame = self.phase_center_catalog[name]["cat_frame"]
self.phase_center_epoch = self.phase_center_catalog[name]["cat_epoch"]
self.multi_phase_center = False
self._phase_center_id_array.required = False
self._Nphase.required = False
self._phase_center_catalog.required = False
self.object_name = name
self.Nphase = None
self.phase_center_catalog = None
self.phase_center_id_array = None
elif self.multi_phase_center:
su_hdu = hdu_list[hdunames["AIPS SU"]]
# We should have as many entries in the AIPS SU header as we have
# unique entries in the SOURCES random paramter (checked in the call
# to get_parameter_data above)
if len(su_hdu.data) != len(np.unique(self.phase_center_id_array)):
raise RuntimeError(
"The UVFITS file has a malformed AIPS SU table - number of "
"sources do not match the number of unique source IDs in the "
"primary data header."
) # pragma: no cover
# Reset the catalog, since it has some dummy information stored within
# it (that was pulled off the primary table)
self._remove_phase_center(list(self.phase_center_catalog.keys())[0])
# Set up these arrays so we can assign values to them
self.phase_center_app_ra = np.zeros(self.Nblts)
self.phase_center_app_dec = np.zeros(self.Nblts)
self.phase_center_app_pa = np.zeros(self.Nblts)
# Alright, we are off to the races!
for idx in range(len(su_hdu.data)):
# Grab the indv source entry
sou_info = su_hdu.data[idx]
sou_id = sou_info["ID. NO."]
sou_name = sou_info["SOURCE"]
sou_ra = sou_info["RAEPO"] * (np.pi / 180.0)
sou_dec = sou_info["DECEPO"] * (np.pi / 180.0)
sou_epoch = sou_info["EPOCH"]
sou_frame = "fk5"
self._add_phase_center(
sou_name,
cat_id=sou_id,
cat_type="sidereal",
cat_lon=sou_ra,
cat_lat=sou_dec,
cat_frame=sou_frame,
cat_epoch=sou_epoch,
info_source="uvfits file",
)
# Calculate the apparent coordinate values
self._set_app_coords_helper()
if not read_data:
# don't read in the data. This means the object is a metadata
# only object but that may not matter for many purposes.
return
# Now read in the data
self._get_data(
vis_hdu,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
False,
keep_all_metadata,
run_check,
check_extra,
run_check_acceptability,
strict_uvw_antpos_check,
fix_old_proj,
fix_use_ant_pos,
)
def write_uvfits(
self,
filename,
spoof_nonessential=False,
write_lst=True,
force_phase=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write the data to a uvfits file.
Parameters
----------
filename : str
The uvfits file to write to.
spoof_nonessential : bool
Option to spoof the values of optional UVParameters that are not set
but are required for uvfits files.
write_lst : bool
Option to write the LSTs to the metadata (random group parameters).
force_phase : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
run_check : bool
Option to check for the existence and proper shapes of parameters
before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
The `phase_type` of the object is "drift" and the `force_phase`
keyword is not set.
If the frequencies are not evenly spaced or are separated by more
than their channel width.
The polarization values are not evenly spaced.
Any of ['antenna_positions', 'gst0', 'rdate', 'earth_omega', 'dut1',
'timesys'] are not set on the object and `spoof_nonessential` is False.
If the `timesys` parameter is not set to "UTC".
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
check_freq_spacing=True,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if self.phase_type == "phased":
pass
elif self.phase_type == "drift":
if force_phase:
print(
"The data are in drift mode and do not have a "
"defined phase center. Phasing to zenith of the first "
"timestamp."
)
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
else:
raise ValueError(
"The data are in drift mode. "
"Set force_phase to true to phase the data "
"to zenith of the first timestamp before "
"writing a uvfits file."
)
if self.flex_spw:
# If we have a 'flexible' spectral window, we will need to evaluate the
# frequency axis slightly differently.
if self.future_array_shapes:
freq_array_use = self.freq_array
else:
freq_array_use = self.freq_array[0, :]
nchan_list = []
start_freq_array = []
delta_freq_array = []
for idx in self.spw_array:
chan_mask = self.flex_spw_id_array == idx
nchan_list += [np.sum(chan_mask)]
start_freq_array += [freq_array_use[chan_mask][0]]
# Need the array direction here since channel_width is always supposed
# to be > 0, but channels can be in decending freq order
freq_dir = np.sign(np.median(np.diff(freq_array_use[chan_mask])))
delta_freq_array += [
np.median(self.channel_width[chan_mask]) * freq_dir
]
start_freq_array = np.reshape(np.array(start_freq_array), (1, -1)).astype(
np.float64
)
delta_freq_array = np.reshape(np.array(delta_freq_array), (1, -1)).astype(
np.float64
)
# We've constructed a couple of lists with relevant values, now time to
# check them to make sure that the data will write correctly
# Make sure that all the windows are of the same size
if len(np.unique(nchan_list)) != 1:
raise IndexError(
"UVFITS format cannot handle spectral windows of different sizes!"
)
# Make sure freq values are greater zero. Note that I think _technically
# one could write negative frequencies into the dataset, but I am pretty
# sure that reduction packages may balk hard.
if np.any(start_freq_array <= 0):
raise ValueError("Frequency values must be > 0 for UVFITS!")
# Make sure the delta values are non-zero
if np.any(delta_freq_array == 0):
raise ValueError("Something is wrong, frequency values not unique!")
# If we passed all the above checks, then it's time to fill some extra
# array values. Note that 'ref_freq' is something of a placeholder for
# other exciting things...
ref_freq = start_freq_array[0, 0]
else:
if self.future_array_shapes:
ref_freq = self.freq_array[0]
# we've already run the check_freq_spacing, so channel widths are the
# same to our tolerances
delta_freq_array = np.array([[np.median(self.channel_width)]]).astype(
np.float64
)
else:
ref_freq = self.freq_array[0, 0]
delta_freq_array = np.array([[self.channel_width]]).astype(np.float64)
if self.Npols > 1:
pol_spacing = np.diff(self.polarization_array)
pol_indexing = np.argsort(np.abs(self.polarization_array))
polarization_array = self.polarization_array[pol_indexing]
pol_spacing = np.diff(polarization_array)
if np.min(pol_spacing) < np.max(pol_spacing):
raise ValueError(
"The polarization values are not evenly spaced (probably "
"because of a select operation). The uvfits format "
"does not support unevenly spaced polarizations."
)
pol_spacing = pol_spacing[0]
else:
pol_indexing = np.asarray([0])
polarization_array = self.polarization_array
pol_spacing = 1
for p in self.extra():
param = getattr(self, p)
if param.name in self.uvfits_required_extra:
if param.value is None:
if spoof_nonessential:
param.apply_spoof()
setattr(self, p, param)
else:
raise ValueError(
"Required attribute {attribute} "
"for uvfits not defined. Define or "
"set spoof_nonessential to True to "
"spoof this attribute.".format(attribute=p)
)
# check for unflagged data with nsample = 0. Warn if any found
wh_nsample0 = np.where(self.nsample_array == 0)
if np.any(~self.flag_array[wh_nsample0]):
warnings.warn(
"Some unflagged data has nsample = 0. Flags and "
"nsamples are combined in uvfits files such that "
"these data will appear to be flagged."
)
uvfits_data_shape = (
self.Nblts,
1,
1,
self.Nspws,
self.Nfreqs // self.Nspws,
self.Npols,
1,
)
# Reshape the arrays so that they match the uvfits conventions
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
data_array = np.reshape(np.conj(self.data_array), uvfits_data_shape)
weights_array = np.reshape(
self.nsample_array * np.where(self.flag_array, -1, 1), uvfits_data_shape,
)
data_array = data_array[:, :, :, :, :, pol_indexing, :]
weights_array = weights_array[:, :, :, :, :, pol_indexing, :]
uvfits_array_data = np.concatenate(
[data_array.real, data_array.imag, weights_array], axis=6
)
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
uvw_array_sec = -1 * self.uvw_array / const.c.to("m/s").value
# uvfits convention is that there are two float32 time_arrays and the
# float64 sum of them + relevant PZERO = actual JD
# a common practice is to set the PZERO to the JD at midnight of the first time
jd_midnight = np.floor(self.time_array[0] - 0.5) + 0.5
time_array1 = np.float32(self.time_array - jd_midnight)
time_array2 = np.float32(
self.time_array - jd_midnight - np.float64(time_array1)
)
int_time_array = self.integration_time
baselines_use = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array, attempt256=True
)
# Set up dictionaries for populating hdu
# Note that uvfits antenna arrays are 1-indexed so we add 1
# to our 0-indexed arrays
group_parameter_dict = {
"UU ": uvw_array_sec[:, 0],
"VV ": uvw_array_sec[:, 1],
"WW ": uvw_array_sec[:, 2],
"DATE ": time_array1,
"DATE2 ": time_array2,
"BASELINE": baselines_use,
"SOURCE ": None,
"FREQSEL ": np.ones_like(self.time_array, dtype=np.float32),
"ANTENNA1": self.ant_1_array + 1,
"ANTENNA2": self.ant_2_array + 1,
"SUBARRAY": np.ones_like(self.ant_1_array),
"INTTIM ": int_time_array,
}
if self.multi_phase_center:
id_offset = np.any(
[
temp_dict["cat_id"] == 0
for temp_dict in self.phase_center_catalog.values()
]
)
group_parameter_dict["SOURCE "] = self.phase_center_id_array + id_offset
pscal_dict = {
"UU ": 1.0,
"VV ": 1.0,
"WW ": 1.0,
"DATE ": 1.0,
"DATE2 ": 1.0,
"BASELINE": 1.0,
"SOURCE ": 1.0,
"FREQSEL ": 1.0,
"ANTENNA1": 1.0,
"ANTENNA2": 1.0,
"SUBARRAY": 1.0,
"INTTIM ": 1.0,
}
pzero_dict = {
"UU ": 0.0,
"VV ": 0.0,
"WW ": 0.0,
"DATE ": jd_midnight,
"DATE2 ": 0.0,
"BASELINE": 0.0,
"SOURCE ": 0.0,
"FREQSEL ": 0.0,
"ANTENNA1": 0.0,
"ANTENNA2": 0.0,
"SUBARRAY": 0.0,
"INTTIM ": 0.0,
}
if write_lst:
# lst is a non-standard entry (it's not in the AIPS memo)
# but storing it can be useful (e.g. can avoid recalculating it on read)
# need to store it in 2 parts to get enough accuracy
# angles in uvfits files are stored in degrees, so first convert to degrees
lst_array_deg = np.rad2deg(self.lst_array)
lst_array_1 = np.float32(lst_array_deg)
lst_array_2 = np.float32(lst_array_deg - np.float64(lst_array_1))
group_parameter_dict["LST "] = lst_array_1
pscal_dict["LST "] = 1.0
pzero_dict["LST "] = 0.0
# list contains arrays of [u,v,w,date,baseline];
# each array has shape (Nblts)
parnames_use = ["UU ", "VV ", "WW ", "DATE ", "DATE2 "]
if np.max(self.ant_1_array) < 255 and np.max(self.ant_2_array) < 255:
# if the number of antennas is less than 256 then include both the
# baseline array and the antenna arrays in the group parameters.
# Otherwise just use the antenna arrays
parnames_use.append("BASELINE")
if self.multi_phase_center:
parnames_use.append("SOURCE ")
parnames_use += ["ANTENNA1", "ANTENNA2", "SUBARRAY", "INTTIM "]
if write_lst:
parnames_use.append("LST ")
group_parameter_list = [
group_parameter_dict[parname] for parname in parnames_use
]
if write_lst:
# add second LST array part
parnames_use.append("LST ")
group_parameter_list.append(lst_array_2)
parnames_use_datefix = copy.deepcopy(parnames_use)
parnames_use_datefix[parnames_use_datefix.index("DATE2 ")] = "DATE "
hdu = fits.GroupData(
uvfits_array_data,
parnames=parnames_use_datefix,
pardata=group_parameter_list,
bitpix=-32,
)
hdu = fits.GroupsHDU(hdu)
for i, key in enumerate(parnames_use):
hdu.header["PSCAL" + str(i + 1) + " "] = pscal_dict[key]
hdu.header["PZERO" + str(i + 1) + " "] = pzero_dict[key]
# ISO string of first time in self.time_array
hdu.header["DATE-OBS"] = Time(self.time_array[0], scale="utc", format="jd").isot
hdu.header["CTYPE2 "] = "COMPLEX "
hdu.header["CRVAL2 "] = 1.0
hdu.header["CRPIX2 "] = 1.0
hdu.header["CDELT2 "] = 1.0
# Note: This axis is called STOKES to comply with the AIPS memo 117
# However, this confusing because it is NOT a true Stokes axis,
# it is really the polarization axis.
hdu.header["CTYPE3 "] = "STOKES "
hdu.header["CRVAL3 "] = float(polarization_array[0])
hdu.header["CRPIX3 "] = 1.0
hdu.header["CDELT3 "] = float(pol_spacing)
hdu.header["CTYPE4 "] = "FREQ "
hdu.header["CRVAL4 "] = ref_freq
hdu.header["CRPIX4 "] = 1.0
hdu.header["CDELT4 "] = delta_freq_array[0, 0]
hdu.header["CTYPE5 "] = "IF "
hdu.header["CRVAL5 "] = 1.0
hdu.header["CRPIX5 "] = 1.0
hdu.header["CDELT5 "] = 1.0
hdu.header["CTYPE6 "] = "RA"
hdu.header["CRVAL6 "] = self.phase_center_ra_degrees
hdu.header["CTYPE7 "] = "DEC"
hdu.header["CRVAL7 "] = self.phase_center_dec_degrees
hdu.header["BUNIT "] = self.vis_units
hdu.header["BSCALE "] = 1.0
hdu.header["BZERO "] = 0.0
name = "MULTI" if self.multi_phase_center else self.object_name
hdu.header["OBJECT "] = name
hdu.header["TELESCOP"] = self.telescope_name
hdu.header["LAT "] = self.telescope_location_lat_lon_alt_degrees[0]
hdu.header["LON "] = self.telescope_location_lat_lon_alt_degrees[1]
hdu.header["ALT "] = self.telescope_location_lat_lon_alt[2]
hdu.header["INSTRUME"] = self.instrument
if self.phase_center_epoch is not None:
hdu.header["EPOCH "] = float(self.phase_center_epoch)
# TODO: This is a keyword that should at some point get added for velocity
# reference stuff, although for right now pyuvdata doesn't do any sort of
# handling of this, so stub this out for now.
# hdu.header["SPECSYS "] = "TOPOCENT"
if self.phase_center_frame is not None:
# Previous versions of pyuvdata wrote this header as PHSFRAME
hdu.header["RADESYS"] = self.phase_center_frame
if self.x_orientation is not None:
hdu.header["XORIENT"] = self.x_orientation
if self.blt_order is not None:
blt_order_str = ", ".join(self.blt_order)
hdu.header["BLTORDER"] = blt_order_str
for line in self.history.splitlines():
hdu.header.add_history(line)
# end standard keywords; begin user-defined keywords
for key, value in self.extra_keywords.items():
# header keywords have to be 8 characters or less
if len(str(key)) > 8:
warnings.warn(
"key {key} in extra_keywords is longer than 8 "
"characters. It will be truncated to 8 as required "
"by the uvfits file format.".format(key=key)
)
keyword = key[:8].upper()
if isinstance(value, (dict, list, np.ndarray)):
raise TypeError(
"Extra keyword {keyword} is of {keytype}. "
"Only strings and numbers are "
"supported in uvfits.".format(keyword=key, keytype=type(value))
)
if keyword == "COMMENT":
for line in value.splitlines():
hdu.header.add_comment(line)
else:
hdu.header[keyword] = value
# ADD the ANTENNA table
staxof = np.zeros(self.Nants_telescope)
# 0 specifies alt-az, 6 would specify a phased array
mntsta = np.zeros(self.Nants_telescope)
# beware, X can mean just about anything
poltya = np.full((self.Nants_telescope), "X", dtype=np.object_)
polaa = [90.0] + np.zeros(self.Nants_telescope)
poltyb = np.full((self.Nants_telescope), "Y", dtype=np.object_)
polab = [0.0] + np.zeros(self.Nants_telescope)
col1 = fits.Column(name="ANNAME", format="8A", array=self.antenna_names)
# AIPS memo #117 says that antenna_positions should be relative to
# the array center, but in a rotated ECEF frame so that the x-axis
# goes through the local meridian.
longitude = self.telescope_location_lat_lon_alt[1]
rot_ecef_positions = uvutils.rotECEF_from_ECEF(
self.antenna_positions, longitude
)
col2 = fits.Column(name="STABXYZ", format="3D", array=rot_ecef_positions)
# col3 = fits.Column(name="ORBPARAM", format="0D", array=Norb)
# convert to 1-indexed from 0-indexed indicies
col4 = fits.Column(name="NOSTA", format="1J", array=self.antenna_numbers + 1)
col5 = fits.Column(name="MNTSTA", format="1J", array=mntsta)
col6 = fits.Column(name="STAXOF", format="1E", array=staxof)
col7 = fits.Column(name="POLTYA", format="1A", array=poltya)
col8 = fits.Column(name="POLAA", format="1E", array=polaa)
# col9 = fits.Column(name='POLCALA', format='0E', array=Npcal, Nspws)
col10 = fits.Column(name="POLTYB", format="1A", array=poltyb)
col11 = fits.Column(name="POLAB", format="1E", array=polab)
# col12 = fits.Column(name='POLCALB', format='0E', array=Npcal, Nspws)
col_list = [col1, col2, col4, col5, col6, col7, col8, col10, col11]
# The commented out entires are up above to help check for consistency with the
# UVFITS format. ORBPARAM, POLCALA, and POLCALB are all technically required,
# but are all of zero length. Added here to help with debugging.
if self.antenna_diameters is not None:
col12 = fits.Column(
name="DIAMETER", format="1E", array=self.antenna_diameters
)
col_list.append(col12)
cols = fits.ColDefs(col_list)
ant_hdu = fits.BinTableHDU.from_columns(cols)
ant_hdu.header["EXTNAME"] = "AIPS AN"
ant_hdu.header["EXTVER"] = 1
# write XYZ coordinates
ant_hdu.header["ARRAYX"] = self.telescope_location[0]
ant_hdu.header["ARRAYY"] = self.telescope_location[1]
ant_hdu.header["ARRAYZ"] = self.telescope_location[2]
ant_hdu.header["FRAME"] = "ITRF"
ant_hdu.header["GSTIA0"] = self.gst0
# TODO Karto: Do this more intelligently in the future
if self.future_array_shapes:
ant_hdu.header["FREQ"] = self.freq_array[0]
else:
ant_hdu.header["FREQ"] = self.freq_array[0, 0]
ant_hdu.header["RDATE"] = self.rdate
ant_hdu.header["UT1UTC"] = self.dut1
ant_hdu.header["TIMESYS"] = self.timesys
if self.timesys != "UTC":
raise ValueError(
"This file has a time system {tsys}. "
'Only "UTC" time system files are supported'.format(tsys=self.timesys)
)
ant_hdu.header["ARRNAM"] = self.telescope_name
ant_hdu.header["NO_IF"] = self.Nspws
ant_hdu.header["DEGPDY"] = self.earth_omega
# This is just a statically defined value
ant_hdu.header["IATUTC"] = 37.0
# set mandatory parameters which are not supported by this object
# (or that we just don't understand)
ant_hdu.header["NUMORB"] = 0
# note: Bart had this set to 3. We've set it 0 after aips 117. -jph
ant_hdu.header["NOPCAL"] = 0
ant_hdu.header["POLTYPE"] = "X-Y LIN"
# note: we do not support the concept of "frequency setups"
# -- lists of spws given in a SU table.
# Karto: Here might be a place to address freq setup?
ant_hdu.header["FREQID"] = 1
# if there are offsets in images, this could be the culprit
ant_hdu.header["POLARX"] = 0.0
ant_hdu.header["POLARY"] = 0.0
ant_hdu.header["DATUTC"] = 0 # ONLY UTC SUPPORTED
# we always output right handed coordinates
ant_hdu.header["XYZHAND"] = "RIGHT"
# At some point, we can fill these in more completely using astropy IERS
# utilities, since CASA/AIPS doesn't want to be told what the apparent coords
# are, but rather wants to calculate them itself.
# ant_hdu.header["RDATE"] = '2020-07-24T16:35:39.144087'
# ant_hdu.header["POLARX"] = 0.0
# ant_hdu.header["POLARY"] = 0.0
fits_tables = [hdu, ant_hdu]
# If needed, add the FQ table
if self.Nspws > 1:
fmt_d = "%iD" % self.Nspws
fmt_e = "%iE" % self.Nspws
fmt_j = "%iJ" % self.Nspws
# TODO Karto: Temp implementation until we fix some other things in UVData
if_freq = start_freq_array - ref_freq
ch_width = delta_freq_array
tot_bw = (self.Nfreqs // self.Nspws) * np.abs(delta_freq_array)
sideband = np.sign(delta_freq_array) * np.ones((1, self.Nspws))
# FRQSEL is hardcoded at the moment, could think about doing this
# at least somewhat more intelligently...
col_list = [
fits.Column(name="FRQSEL", format="1J", array=[1]),
fits.Column(name="IF FREQ", unit="HZ", format=fmt_d, array=if_freq),
fits.Column(name="CH WIDTH", unit="HZ", format=fmt_e, array=ch_width),
fits.Column(
name="TOTAL BANDWIDTH", unit="HZ", format=fmt_e, array=tot_bw
),
fits.Column(name="SIDEBAND", format=fmt_j, array=sideband),
]
fq_hdu = fits.BinTableHDU.from_columns(fits.ColDefs(col_list))
fq_hdu.header["EXTNAME"] = "AIPS FQ"
fq_hdu.header["NO_IF"] = self.Nspws
fits_tables.append(fq_hdu)
# If needed, add the SU table
if self.multi_phase_center:
fmt_d = "%iD" % self.Nspws
fmt_e = "%iE" % self.Nspws
fmt_j = "%iJ" % self.Nspws
int_zeros = np.zeros(self.Nphase, dtype=int)
flt_zeros = np.zeros(self.Nphase, dtype=np.float64)
zero_arr = np.zeros((self.Nphase, self.Nspws))
sou_ids = np.zeros(self.Nphase)
name_arr = np.array(list(self.phase_center_catalog.keys()))
cal_code = [" "] * self.Nphase
# These are things we need to flip through on a source-by-source basis
ra_arr = np.zeros(self.Nphase, dtype=np.float64)
app_ra = np.zeros(self.Nphase, dtype=np.float64)
dec_arr = np.zeros(self.Nphase, dtype=np.float64)
app_dec = np.zeros(self.Nphase, dtype=np.float64)
epo_arr = np.zeros(self.Nphase, dtype=np.float64)
pm_ra = np.zeros(self.Nphase, dtype=np.float64)
pm_dec = np.zeros(self.Nphase, dtype=np.float64)
rest_freq = np.zeros((self.Nphase, self.Nspws), dtype=np.float64)
for idx, name in enumerate(name_arr):
phase_dict = self.phase_center_catalog[name]
# This is a stub for something smarter in the future
sou_ids[idx] = self.phase_center_catalog[name]["cat_id"] + id_offset
rest_freq[idx][:] = | np.mean(self.freq_array) | numpy.mean |
# DNN (prediction)
import numpy as np
import tensorflow as tf
import datetime
import ctr_funcs as func
import config_deepmcp as cfg
import os
import shutil
# config
str_txt = cfg.output_file_name
base_path = './tmp'
model_saving_addr = base_path + '/dnn_' + str_txt + '/'
output_file_name = base_path + '/dnn_' + str_txt + '.txt'
num_csv_col = cfg.num_csv_col
train_file_name = cfg.train_file_name
val_file_name = cfg.val_file_name
test_file_name = cfg.test_file_name
batch_size = cfg.batch_size
n_ft = cfg.n_ft
k = cfg.k
kp_prob = cfg.kp_prob
n_epoch = cfg.n_epoch
max_num_lower_ct = cfg.max_num_lower_ct
record_step_size = cfg.record_step_size
layer_dim = cfg.layer_dim
layer_dim_match = cfg.layer_dim_match
eta = cfg.eta # learning rate
opt_alg = cfg.opt_alg
n_one_hot_slot = cfg.n_one_hot_slot
n_mul_hot_slot = cfg.n_mul_hot_slot
max_len_per_slot = cfg.max_len_per_slot
label_col_idx = 0
record_defaults = [[0]] * num_csv_col
record_defaults[0] = [0.0]
total_num_ft_col = num_csv_col - 1
# create dir
if not os.path.exists(base_path):
os.mkdir(base_path)
# remove dir
if os.path.isdir(model_saving_addr):
shutil.rmtree(model_saving_addr)
# for DNN
idx_1 = n_one_hot_slot
idx_2 = idx_1 + n_mul_hot_slot * max_len_per_slot
###########################################################
###########################################################
print('Loading data start!')
tf.set_random_seed(123)
# load training data
train_ft, train_label = func.tf_input_pipeline(train_file_name, batch_size, n_epoch, label_col_idx, record_defaults)
n_val_inst = func.count_lines(val_file_name[0])
val_ft, val_label = func.tf_input_pipeline(val_file_name, n_val_inst, 1, label_col_idx, record_defaults)
n_val_batch = n_val_inst // batch_size
# load test data
test_ft, test_label = func.tf_input_pipeline_test(test_file_name, batch_size, 1, label_col_idx, record_defaults)
print('Loading data set 1 done!')
########################################################################
# add mask
def get_masked_one_hot(x_input_one_hot):
data_mask = tf.cast(tf.greater(x_input_one_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis=2)
data_mask = tf.tile(data_mask, (1, 1, k))
# output: (?, n_one_hot_slot, k)
data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot)
data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)
return data_embed_one_hot_masked
def get_masked_mul_hot(x_input_mul_hot):
data_mask = tf.cast(tf.greater(x_input_mul_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis=3)
data_mask = tf.tile(data_mask, (1, 1, 1, k))
# output: (?, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot)
data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)
# output: (?, n_mul_hot_slot, k)
data_embed_mul_hot_masked = tf.reduce_sum(data_embed_mul_hot_masked, 2)
return data_embed_mul_hot_masked
# output: (?, n_one_hot_slot + n_mul_hot_slot, k)
def get_concate_embed(x_input_one_hot, x_input_mul_hot):
data_embed_one_hot = get_masked_one_hot(x_input_one_hot)
data_embed_mul_hot = get_masked_mul_hot(x_input_mul_hot)
data_embed_concat = tf.concat([data_embed_one_hot, data_embed_mul_hot], 1)
return data_embed_concat
# input: (?, n_slot*k)
# output: (?, 1)
def get_pred_output(data_embed_concat):
# include output layer
n_layer = len(layer_dim)
data_embed_dnn = tf.reshape(data_embed_concat, [-1, (n_one_hot_slot + n_mul_hot_slot) * k])
cur_layer = data_embed_dnn
# loop to create DNN struct
for i in range(0, n_layer):
# output layer, linear activation
if i == n_layer - 1:
cur_layer = tf.matmul(cur_layer, weight_dict[i]) + bias_dict[i]
else:
cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight_dict[i]) + bias_dict[i])
cur_layer = tf.nn.dropout(cur_layer, keep_prob)
y_hat = cur_layer
return y_hat
###########################################################
###########################################################
# input for prediction loss
x_input = tf.placeholder(tf.int32, shape=[None, total_num_ft_col])
# shape=[None, n_one_hot_slot]
x_input_one_hot = x_input[:, 0:idx_1]
x_input_mul_hot = x_input[:, idx_1:idx_2]
# shape=[None, n_mul_hot_slot, max_len_per_slot]
x_input_mul_hot = tf.reshape(x_input_mul_hot, (-1, n_mul_hot_slot, max_len_per_slot))
# target vec for l1
y_target = tf.placeholder(tf.float32, shape=[None, 1])
# dropout keep prob
keep_prob = tf.placeholder(tf.float32)
# emb_mat dim add 1 -> for padding (idx = 0)
with tf.device('/cpu:0'):
emb_mat = tf.Variable(tf.random_normal([n_ft + 1, k], stddev=0.01))
################################
# prediction subnet FC layers, including output layer
n_layer = len(layer_dim)
in_dim = (n_one_hot_slot + n_mul_hot_slot) * k
weight_dict = {}
bias_dict = {}
# loop to create DNN vars
for i in range(0, n_layer):
out_dim = layer_dim[i]
weight_dict[i] = tf.Variable(tf.random_normal(shape=[in_dim, out_dim], stddev=np.sqrt(2.0 / (in_dim + out_dim))))
bias_dict[i] = tf.Variable(tf.constant(0.1, shape=[out_dim]))
in_dim = layer_dim[i]
################################
data_embed_concat = get_concate_embed(x_input_one_hot, x_input_mul_hot)
y_hat = get_pred_output(data_embed_concat)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=y_target))
#############################
# prediction
#############################
pred_score = tf.sigmoid(y_hat)
if opt_alg == 'Adam':
optimizer = tf.train.AdamOptimizer(eta).minimize(loss)
else:
# default
optimizer = tf.train.AdagradOptimizer(eta).minimize(loss)
########################################
# Launch the graph.
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
saver_val = tf.train.Saver()
train_loss_list = []
val_auc_list = []
best_n_round = 0
best_val_auc = 0
lower_ct = 0
early_stop_flag = 0
val_ft_inst, val_label_inst = sess.run([val_ft, val_label])
func.print_time()
print('Start train loop')
epoch = -1
try:
while not coord.should_stop():
epoch += 1
train_ft_inst, train_label_inst = sess.run([train_ft, train_label])
train_label_inst = np.transpose([train_label_inst])
# training
sess.run(optimizer, feed_dict={x_input: train_ft_inst, y_target: train_label_inst, \
keep_prob: kp_prob})
# record loss and accuracy every step_size generations
if (epoch + 1) % record_step_size == 0:
train_loss_temp = sess.run(loss, feed_dict={ \
x_input: train_ft_inst, y_target: train_label_inst, \
keep_prob: 1.0})
train_loss_list.append(train_loss_temp)
val_pred_score_all = []
val_label_all = []
for iii in range(n_val_batch):
# get batch
start_idx = iii * batch_size
end_idx = (iii + 1) * batch_size
cur_val_ft = val_ft_inst[start_idx: end_idx]
cur_val_label = val_label_inst[start_idx: end_idx]
# pred score
cur_val_pred_score = sess.run(pred_score, feed_dict={ \
x_input: cur_val_ft, keep_prob: 1.0})
val_pred_score_all.append(cur_val_pred_score.flatten())
val_label_all.append(cur_val_label)
# calculate auc
val_pred_score_re = func.list_flatten(val_pred_score_all)
val_label_re = func.list_flatten(val_label_all)
val_auc_temp, _, _ = func.cal_auc(val_pred_score_re, val_label_re)
# record all val results
val_auc_list.append(val_auc_temp)
# record best and save models
if val_auc_temp > best_val_auc:
best_val_auc = val_auc_temp
best_n_round = epoch
# Save the variables to disk
save_path = saver_val.save(sess, model_saving_addr)
print("Model saved in: %s" % save_path)
# count of consecutive lower
if val_auc_temp < best_val_auc:
lower_ct += 1
# once higher or equal, set to 0
else:
lower_ct = 0
if lower_ct >= max_num_lower_ct:
early_stop_flag = 1
auc_and_loss = [epoch + 1, train_loss_temp, val_auc_temp]
# round to given number of decimals
auc_and_loss = [np.round(xx, 4) for xx in auc_and_loss]
func.print_time()
print('Generation # {}. Train Loss: {:.4f}. Val Avg AUC: {:.4f}.' \
.format(*auc_and_loss))
# stop while loop
if early_stop_flag == 1:
break
except tf.errors.OutOfRangeError:
func.print_time()
print('Done training -- epoch limit reached')
# restore model
saver_val.restore(sess, model_saving_addr)
print("Model restored.")
# load test data
test_pred_score_all = []
test_label_all = []
test_loss_all = []
try:
while True:
test_ft_inst, test_label_inst = sess.run([test_ft, test_label])
cur_test_pred_score = sess.run(pred_score, feed_dict={ \
x_input: test_ft_inst, keep_prob: 1.0})
test_pred_score_all.append(cur_test_pred_score.flatten())
test_label_all.append(test_label_inst)
cur_test_loss = sess.run(loss, feed_dict={ \
x_input: test_ft_inst, \
y_target: np.transpose([test_label_inst]), keep_prob: 1.0})
test_loss_all.append(cur_test_loss)
except tf.errors.OutOfRangeError:
func.print_time()
print('Done testing -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
# calculate auc
test_pred_score_re = func.list_flatten(test_pred_score_all)
test_label_re = func.list_flatten(test_label_all)
test_auc, _, _ = func.cal_auc(test_pred_score_re, test_label_re)
test_rmse = func.cal_rmse(test_pred_score_re, test_label_re)
test_loss = np.mean(test_loss_all)
# rounding
test_auc = | np.round(test_auc, 4) | numpy.round |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda tools (e.g. pandas, numpy, etc)
*RADIANCE software should be installed from https://github.com/NREL/Radiance/releases
*If you want to use gencumulativesky, move 'gencumulativesky.exe' from
'bifacial_radiance\data' into your RADIANCE source directory.
*If using a Windows machine you should download the Jaloxa executables at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml#Download
* Installation of bifacial_radiance from the repo:
1. Clone the repo
2. Navigate to the directory using the command prompt
3. run `pip install -e . `
Overview:
Bifacial_radiance includes several helper functions to make it easier to evaluate
different PV system orientations for rear bifacial irradiance.
Note that this is simply an optical model - identifying available rear irradiance under different conditions.
For a detailed demonstration example, look at the .ipnyb notebook in \docs\
There are two solar resource modes in bifacial_radiance: `gendaylit` uses hour-by-hour solar
resource descriptions using the Perez diffuse tilted plane model.
`gencumulativesky` is an annual average solar resource that combines hourly
Perez skies into one single solar source, and computes an annual average.
bifacial_radiance includes five object-oriented classes:
RadianceObj: top level class to work on radiance objects, keep track of filenames,
sky values, PV module type etc.
GroundObj: details for the ground surface and reflectance
SceneObj: scene information including array configuration (row spacing, clearance or hub height)
MetObj: meteorological data from EPW (energyplus) file.
Future work: include other file support including TMY files
AnalysisObj: Analysis class for plotting and reporting
"""
import logging
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
import os, datetime
from subprocess import Popen, PIPE # replacement for os.system()
import pandas as pd
import numpy as np
import warnings
#from input import *
# Mutual parameters across all processes
#daydate=sys.argv[1]
global DATA_PATH # path to data files including module.json. Global context
#DATA_PATH = os.path.abspath(pkg_resources.resource_filename('bifacial_radiance', 'data/') )
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def _findme(lst, a): #find string match in a list. script from stackexchange
return [i for i, x in enumerate(lst) if x == a]
def _missingKeyWarning(dictype, missingkey, newvalue): # prints warnings
if type(newvalue) is bool:
valueunit = ''
else:
valueunit = 'm'
print("Warning: {} Dictionary Parameters passed, but {} is missing. ".format(dictype, missingkey))
print("Setting it to default value of {} {} to continue\n".format(newvalue, valueunit))
def _normRGB(r, g, b): #normalize by each color for human vision sensitivity
return r*0.216+g*0.7152+b*0.0722
def _popen(cmd, data_in, data_out=PIPE):
"""
Helper function subprocess.popen replaces os.system
- gives better input/output process control
usage: pass <data_in> to process <cmd> and return results
based on rgbeimage.py (<NAME> 2010)
"""
if type(cmd) == str:
cmd = str(cmd) # gets rid of unicode oddities
shell=True
else:
shell=False
p = Popen(cmd, bufsize=-1, stdin=PIPE, stdout=data_out, stderr=PIPE, shell=shell) #shell=True required for Linux? quick fix, but may be security concern
data, err = p.communicate(data_in)
#if err:
# return 'message: '+err.strip()
#if data:
# return data. in Python3 this is returned as `bytes` and needs to be decoded
if err:
if data:
returntuple = (data.decode('latin1'), 'message: '+err.decode('latin1').strip())
else:
returntuple = (None, 'message: '+err.decode('latin1').strip())
else:
if data:
returntuple = (data.decode('latin1'), None) #Py3 requires decoding
else:
returntuple = (None, None)
return returntuple
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _interactive_directory(title=None):
# Tkinter directory picker. Now Py3.6 compliant!
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring to front
return filedialog.askdirectory(parent=root, title=title)
def _modDict(originaldict, moddict, relative=False):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['xstart'] = 0 to change position of x.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
newdict = originaldict.copy()
for key in moddict:
try:
if relative:
newdict[key] = moddict[key] + newdict[key]
else:
newdict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return newdict
def _heightCasesSwitcher(sceneDict, preferred='hub_height', nonpreferred='clearance_height'):
"""
Parameters
----------
sceneDict : dictionary
Dictionary that might contain more than one way of defining height for
the array: `clearance_height`, `hub_height`, `height`*
* height deprecated from sceneDict. This function helps choose
* which definition to use.
preferred : str, optional
When sceneDict has hub_height and clearance_height, or it only has height,
it will leave only the preferred option.. The default is 'hub_height'.
nonpreferred : TYPE, optional
When sceneDict has hub_height and clearance_height,
it wil ldelete this nonpreferred option. The default is 'clearance_height'.
Returns
-------
sceneDict : TYPE
Dictionary now containing the appropriate definition for system height.
use_clearanceheight : Bool
Helper variable to specify if dictionary has only clearancehet for
use inside `makeScene1axis`. Will get deprecated once that internal
function is streamlined.
"""
# TODO: When we update to python 3.9.0, this could be a Switch Cases (Structural Pattern Matching):
heightCases = '_'
if 'height' in sceneDict:
heightCases = heightCases+'height__'
if 'clearance_height' in sceneDict:
heightCases = heightCases+'clearance_height__'
if 'hub_height' in sceneDict:
heightCases = heightCases+'hub_height__'
use_clearanceheight = False
# CASES:
if heightCases == '_height__':
print("sceneDict Warning: 'height' is being deprecated. "+
"Renaming as "+preferred)
sceneDict[preferred]=sceneDict['height']
del sceneDict['height']
elif heightCases == '_clearance_height__':
#print("Using clearance_height.")
use_clearanceheight = True
elif heightCases == '_hub_height__':
#print("Using hub_height.'")
pass
elif heightCases == '_height__clearance_height__':
print("sceneDict Warning: 'clearance_height and 'height' "+
"(deprecated) are being passed. removing 'height' "+
"from sceneDict for this tracking routine")
del sceneDict['height']
use_clearanceheight = True
elif heightCases == '_height__hub_height__':
print("sceneDict Warning: 'height' is being deprecated. Using 'hub_height'")
del sceneDict['height']
elif heightCases == '_height__clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height', 'clearance_height'"+
", and 'height' are being passed. Removing 'height'"+
" (deprecated) and "+ nonpreferred+ ", using "+preferred)
del sceneDict[nonpreferred]
elif heightCases == '_clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height' and 'clearance_height'"+
" are being passed. Using "+preferred+
" and removing "+ nonpreferred)
del sceneDict[nonpreferred]
else:
print ("sceneDict Error! no argument in sceneDict found "+
"for 'hub_height', 'height' nor 'clearance_height'. "+
"Exiting routine.")
return sceneDict, use_clearanceheight
def _is_leap_and_29Feb(s): # Removes Feb. 29 if it a leap year.
return (s.index.year % 4 == 0) & \
((s.index.year % 100 != 0) | (s.index.year % 400 == 0)) & \
(s.index.month == 2) & (s.index.day == 29)
def _subhourlydatatoGencumskyformat(gencumskydata, label='right'):
# Subroutine to resample, pad, remove leap year and get data in the
# 8760 hourly format
# for saving the temporary files for gencumsky in _saveTempTMY and
# _makeTrackerCSV
#Resample to hourly. Gencumsky wants right-labeled data.
gencumskydata = gencumskydata.resample('60T', closed='right', label='right').mean()
if label == 'left': #switch from left to right labeled by adding an hour
gencumskydata.index = gencumskydata.index + pd.to_timedelta('1H')
# Padding
tzinfo = gencumskydata.index.tzinfo
padstart = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0],1,1,1,0 ) ).tz_localize(tzinfo)
padend = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0]+1,1,1,0,0) ).tz_localize(tzinfo)
gencumskydata.iloc[0] = 0 # set first datapt to zero to forward fill w zeros
gencumskydata.iloc[-1] = 0 # set last datapt to zero to forward fill w zeros
# check if index exists. I'm sure there is a way to do this backwards.
if any(gencumskydata.index.isin([padstart])):
print("Data starts on Jan. 01")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padstart]))
gencumskydata=pd.concat([gencumskydata,pd.DataFrame(index=[padstart])])
if any(gencumskydata.index.isin([padend])):
print("Data ends on Dec. 31st")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padend]))
gencumskydata=pd.concat([gencumskydata, pd.DataFrame(index=[padend])])
gencumskydata.loc[padstart]=0
gencumskydata.loc[padend]=0
gencumskydata=gencumskydata.sort_index()
# Fill empty timestamps with zeros
gencumskydata = gencumskydata.resample('60T').asfreq().fillna(0)
# Mask leap year
leapmask = ~(_is_leap_and_29Feb(gencumskydata))
gencumskydata = gencumskydata[leapmask]
if (gencumskydata.index.year[-1] == gencumskydata.index.year[-2]+1) and len(gencumskydata)>8760:
gencumskydata = gencumskydata[:-1]
return gencumskydata
# end _subhourlydatatoGencumskyformat
class RadianceObj:
"""
The RadianceObj top level class is used to work on radiance objects,
keep track of filenames, sky values, PV module configuration, etc.
Parameters
----------
name : text to append to output files
filelist : list of Radiance files to create oconv
nowstr : current date/time string
path : working directory with Radiance materials and objects
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, name=None, path=None, hpc=False):
'''
initialize RadianceObj with path of Radiance materials and objects,
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
hpc: Keeps track if User is running simulation on HPC so some file
reading routines try reading a bit longer and some writing
routines (makeModule) that overwrite themselves are inactivated.
Returns
-------
none
'''
self.metdata = {} # data from epw met file
self.data = {} # data stored at each timestep
self.path = "" # path of working directory
self.name = "" # basename to append
#self.filelist = [] # list of files to include in the oconv
self.materialfiles = [] # material files for oconv
self.skyfiles = [] # skyfiles for oconv
self.radfiles = [] # scene rad files for oconv
self.octfile = [] #octfile name for analysis
self.Wm2Front = 0 # cumulative tabulation of front W/m2
self.Wm2Back = 0 # cumulative tabulation of rear W/m2
self.backRatio = 0 # ratio of rear / front Wm2
self.nMods = None # number of modules per row
self.nRows = None # number of rows per scene
self.hpc = hpc # HPC simulation is being run. Some read/write functions are modified
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
# DEFAULTS
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.basename = name # add backwards compatibility for prior versions
#self.__name__ = self.name #optional info
#self.__str__ = self.__name__ #optional info
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
# load files in the /materials/ directory
self.materialfiles = self.returnMaterialFiles('materials')
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
_checkPath('images'); _checkPath('objects')
_checkPath('results'); _checkPath('skies'); _checkPath('EPWs')
# if materials directory doesn't exist, populate it with ground.rad
# figure out where pip installed support files.
from shutil import copy2
if not os.path.exists('materials'): #copy ground.rad to /materials
os.makedirs('materials')
print('Making path: materials')
copy2(os.path.join(DATA_PATH, 'ground.rad'), 'materials')
# if views directory doesn't exist, create it with two default views - side.vp and front.vp
if not os.path.exists('views'):
os.makedirs('views')
with open(os.path.join('views', 'side.vp'), 'w') as f:
f.write('rvu -vtv -vp -10 1.5 3 -vd 1.581 0 -0.519234 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
with open(os.path.join('views', 'front.vp'), 'w') as f:
f.write('rvu -vtv -vp 0 -3 5 -vd 0 0.894427 -0.894427 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
def getfilelist(self):
"""
Return concat of matfiles, radfiles and skyfiles
"""
return self.materialfiles + self.skyfiles + self.radfiles
def save(self, savefile=None):
"""
Pickle the radiance object for further use.
Very basic operation - not much use right now.
Parameters
----------
savefile : str
Optional savefile name, with .pickle extension.
Otherwise default to save.pickle
"""
import pickle
if savefile is None:
savefile = 'save.pickle'
with open(savefile, 'wb') as f:
pickle.dump(self, f)
print('Saved to file {}'.format(savefile))
#def setHPC(self, hpc=True):
# self.hpc = hpc
def addMaterial(self, material, Rrefl, Grefl, Brefl, materialtype='plastic',
specularity=0, roughness=0, material_file=None, comment=None, rewrite=True):
"""
Function to add a material in Radiance format.
Parameters
----------
material : str
DESCRIPTION.
Rrefl : str
Reflectivity for first wavelength, or 'R' bin.
Grefl : str
Reflecstrtivity for second wavelength, or 'G' bin.
Brefl : str
Reflectivity for third wavelength, or 'B' bin.
materialtype : str, optional
Type of material. The default is 'plastic'. Others can be mirror,
trans, etc. See RADIANCe documentation.
specularity : str, optional
Ratio of reflection that is specular and not diffuse. The default is 0.
roughness : str, optional
This is the microscopic surface roughness: the more jagged the
facets are, the rougher it is and more blurry reflections will appear.
material_file : str, optional
DESCRIPTION. The default is None.
comment : str, optional
DESCRIPTION. The default is None.
rewrite : str, optional
DESCRIPTION. The default is True.
Returns
-------
None. Just adds the material to the material_file specified or the
default in ``materials\ground.rad``.
References:
See examples of documentation for more materialtype details.
http://www.jaloxa.eu/resources/radiance/documentation/docs/radiance_tutorial.pdf page 10
Also, you can use https://www.jaloxa.eu/resources/radiance/colour_picker.shtml
to have a sense of how the material would look with the RGB values as
well as specularity and roughness.
To understand more on reflectivity, specularity and roughness values
https://thinkmoult.com/radiance-specularity-and-roughness-value-examples.html
"""
if material_file is None:
material_file = 'ground.rad'
matfile = os.path.join('materials', material_file)
with open(matfile, 'r') as fp:
buffer = fp.readlines()
# search buffer for material matching requested addition
found = False
for i in buffer:
if materialtype and material in i:
loc = buffer.index(i)
found = True
break
if found:
if rewrite:
print('Material exists, overwriting...\n')
if comment is None:
pre = loc - 1
else:
pre = loc - 2
# commit buffer without material match
with open(matfile, 'w') as fp:
for i in buffer[0:pre]:
fp.write(i)
for i in buffer[loc+4:]:
fp.write(i)
if (found and rewrite) or (not found):
# append -- This will create the file if it doesn't exist
file_object = open(matfile, 'a')
file_object.write("\n\n")
if comment is not None:
file_object.write("#{}".format(comment))
file_object.write("\nvoid {} {}".format(materialtype, material))
if materialtype == 'glass':
file_object.write("\n0\n0\n3 {} {} {}".format(Rrefl, Grefl, Brefl))
else:
file_object.write("\n0\n0\n5 {} {} {} {} {}".format(Rrefl, Grefl, Brefl, specularity, roughness))
file_object.close()
print('Added material {} to file {}'.format(material, material_file))
if (found and not rewrite):
print('Material already exists\n')
def exportTrackerDict(self, trackerdict=None,
savefile=None, reindex=None):
"""
Use :py:func:`~bifacial_radiance.load._exportTrackerDict` to save a
TrackerDict output as a csv file.
Parameters
----------
trackerdict
The tracker dictionary to save
savefile : str
path to .csv save file location
reindex : bool
True saves the trackerdict in TMY format, including rows for hours
where there is no sun/irradiance results (empty)
"""
import bifacial_radiance.load
if trackerdict is None:
trackerdict = self.trackerdict
if savefile is None:
savefile = _interactive_load(title='Select a .csv file to save to')
if reindex is None:
if self.cumulativesky is True:
# don't re-index for cumulativesky,
# which has angles for index
reindex = False
else:
reindex = True
if self.cumulativesky is True and reindex is True:
# don't re-index for cumulativesky,
# which has angles for index
print ("\n Warning: For cumulativesky simulations, exporting the "
"TrackerDict requires reindex = False. Setting reindex = "
"False and proceeding")
reindex = False
bifacial_radiance.load._exportTrackerDict(trackerdict,
savefile,
reindex)
def loadtrackerdict(self, trackerdict=None, fileprefix=None):
"""
Use :py:class:`bifacial_radiance.load._loadtrackerdict`
to browse the results directory and load back any results saved in there.
Parameters
----------
trackerdict
fileprefix : str
"""
from bifacial_radiance.load import loadTrackerDict
if trackerdict is None:
trackerdict = self.trackerdict
(trackerdict, totaldict) = loadTrackerDict(trackerdict, fileprefix)
self.Wm2Front = totaldict['Wm2Front']
self.Wm2Back = totaldict['Wm2Back']
def returnOctFiles(self):
"""
Return files in the root directory with `.oct` extension
Returns
-------
oct_files : list
List of .oct files
"""
oct_files = [f for f in os.listdir(self.path) if f.endswith('.oct')]
#self.oct_files = oct_files
return oct_files
def returnMaterialFiles(self, material_path=None):
"""
Return files in the Materials directory with .rad extension
appends materials files to the oconv file list
Parameters
----------
material_path : str
Optional parameter to point to a specific materials directory.
otherwise /materials/ is default
Returns
-------
material_files : list
List of .rad files
"""
if material_path is None:
material_path = 'materials'
material_files = [f for f in os.listdir(os.path.join(self.path,
material_path)) if f.endswith('.rad')]
materialfilelist = [os.path.join(material_path, f) for f in material_files]
self.materialfiles = materialfilelist
return materialfilelist
def setGround(self, material=None, material_file=None):
"""
Use GroundObj constructor class and return a ground object
Parameters
------------
material : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
self.ground : tuple
self.ground.normval : numeric
Normalized color value
self.ground.ReflAvg : numeric
Average reflectance
"""
if material is None:
try:
if self.metdata.albedo is not None:
material = self.metdata.albedo
print(" Assigned Albedo from metdata.albedo")
except:
pass
self.ground = GroundObj(material, material_file)
def getEPW(self, lat=None, lon=None, GetAll=False):
"""
Subroutine to download nearest epw files to latitude and longitude provided,
into the directory \EPWs\
based on github/aahoo.
.. warning::
verify=false is required to operate within NREL's network.
to avoid annoying warnings, insecurerequestwarning is disabled
currently this function is not working within NREL's network. annoying!
Parameters
----------
lat : decimal
Used to find closest EPW file.
lon : decimal
Longitude value to find closest EPW file.
GetAll : boolean
Download all available files. Note that no epw file will be loaded into memory
"""
import requests, re
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
hdr = {'User-Agent' : "Magic Browser",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
path_to_save = 'EPWs' # create a directory and write the name of directory here
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
def _returnEPWnames():
''' return a dataframe with the name, lat, lon, url of available files'''
r = requests.get('https://github.com/NREL/EnergyPlus/raw/develop/weather/master.geojson', verify=False)
data = r.json() #metadata for available files
#download lat/lon and url details for each .epw file into a dataframe
df = pd.DataFrame({'url':[], 'lat':[], 'lon':[], 'name':[]})
for location in data['features']:
match = re.search(r'href=[\'"]?([^\'" >]+)', location['properties']['epw'])
if match:
url = match.group(1)
name = url[url.rfind('/') + 1:]
lontemp = location['geometry']['coordinates'][0]
lattemp = location['geometry']['coordinates'][1]
dftemp = pd.DataFrame({'url':[url], 'lat':[lattemp], 'lon':[lontemp], 'name':[name]})
#df = df.append(dftemp, ignore_index=True)
df = pd.concat([df, dftemp], ignore_index=True)
return df
def _findClosestEPW(lat, lon, df):
#locate the record with the nearest lat/lon
errorvec = np.sqrt(np.square(df.lat - lat) + np.square(df.lon - lon))
index = errorvec.idxmin()
url = df['url'][index]
name = df['name'][index]
return url, name
def _downloadEPWfile(url, path_to_save, name):
r = requests.get(url, verify=False, headers=hdr)
if r.ok:
filename = os.path.join(path_to_save, name)
# py2 and 3 compatible: binary write, encode text first
with open(filename, 'wb') as f:
f.write(r.text.encode('ascii', 'ignore'))
print(' ... OK!')
else:
print(' connection error status code: %s' %(r.status_code))
r.raise_for_status()
# Get the list of EPW filenames and lat/lon
df = _returnEPWnames()
# find the closest EPW file to the given lat/lon
if (lat is not None) & (lon is not None) & (GetAll is False):
url, name = _findClosestEPW(lat, lon, df)
# download the EPW file to the local drive.
print('Getting weather file: ' + name)
_downloadEPWfile(url, path_to_save, name)
self.epwfile = os.path.join('EPWs', name)
elif GetAll is True:
if input('Downloading ALL EPW files available. OK? [y/n]') == 'y':
# get all of the EPW files
for index, row in df.iterrows():
print('Getting weather file: ' + row['name'])
_downloadEPWfile(row['url'], path_to_save, row['name'])
self.epwfile = None
else:
print('Nothing returned. Proper usage: epwfile = getEPW(lat,lon)')
self.epwfile = None
return self.epwfile
def readWeatherFile(self, weatherFile=None, starttime=None,
endtime=None, label=None, source=None,
coerce_year=None, tz_convert_val=None):
"""
Read either a EPW or a TMY file, calls the functions
:py:class:`~bifacial_radiance.readTMY` or
:py:class:`~bifacial_radiance.readEPW`
according to the weatherfile extention.
Parameters
----------
weatherFile : str
File containing the weather information. EPW, TMY or solargis accepted.
starttime : str
Limited start time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
endtime : str
Limited end time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
daydate : str DEPRECATED
For single day in 'MM/DD' or MM_DD format. Now use starttime and
endtime set to the same date.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
source : str
To help identify different types of .csv files. If None, it assumes
it is a TMY3-style formated data. Current options: 'TMY3',
'solargis', 'EPW'
coerce_year : int
Year to coerce weather data to in YYYY format, ie 2021.
If more than one year of data in the weather file, year is NOT coerced.
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
"""
#from datetime import datetime
import warnings
if weatherFile is None:
if hasattr(self,'epwfile'):
weatherFile = self.epwfile
else:
try:
weatherFile = _interactive_load('Select EPW or TMY3 climate file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
if coerce_year is not None:
coerce_year = int(coerce_year)
if str(coerce_year).__len__() != 4:
warnings.warn('Incorrect coerce_year. Setting to None')
coerce_year = None
def _parseTimes(t, hour, coerce_year):
'''
parse time input t which could be string mm_dd_HH or YYYY-mm-dd_HHMM
or datetime.datetime object. Return pd.datetime object. Define
hour as hour input if not passed directly.
'''
import re
if type(t) == str:
try:
tsplit = re.split('-|_| ', t)
#mm_dd format
if tsplit.__len__() == 2 and t.__len__() == 5:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
tsplit.append(str(hour).rjust(2,'0')+'00')
#mm_dd_hh or YYYY_mm_dd format
elif tsplit.__len__() == 3 :
if tsplit[0].__len__() == 2:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
elif tsplit[0].__len__() == 4:
tsplit.append(str(hour).rjust(2,'0')+'00')
#YYYY-mm-dd_HHMM format
if tsplit.__len__() == 4 and tsplit[0].__len__() == 4:
t_out = pd.to_datetime(''.join(tsplit).ljust(12,'0') )
else:
raise Exception(f'incorrect time string passed {t}.'
'Valid options: mm_dd, mm_dd_HH, '
'mm_dd_HHMM, YYYY-mm-dd_HHMM')
except Exception as e:
# Error for incorrect string passed:
raise(e)
else: #datetime or timestamp
try:
t_out = pd.to_datetime(t)
except pd.errors.ParserError:
print('incorrect time object passed. Valid options: '
'string or datetime.datetime or pd.timeIndex. You '
f'passed {type(t)}.')
return t_out, coerce_year
# end _parseTimes
def _tz_convert(metdata, metadata, tz_convert_val):
"""
convert metdata to a different local timzone. Particularly for
SolarGIS weather files which are returned in UTC by default.
----------
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
Returns: metdata, metadata
"""
import pytz
if (type(tz_convert_val) == int) | (type(tz_convert_val) == float):
metadata['TZ'] = tz_convert_val
metdata = metdata.tz_convert(pytz.FixedOffset(tz_convert_val*60))
return metdata, metadata
# end _tz_convert
if source is None:
if weatherFile[-3:].lower() == 'epw':
source = 'EPW'
else:
print('Warning: CSV file passed for input. Assuming it is TMY3'+
'style format')
source = 'TMY3'
if label is None:
label = 'right' # EPW and TMY are by deffault right-labeled.
if source.lower() == 'solargis':
if label is None:
label = 'center'
metdata, metadata = self._readSOLARGIS(weatherFile, label=label)
if source.lower() =='epw':
metdata, metadata = self._readEPW(weatherFile, label=label)
if source.lower() =='tmy3':
metdata, metadata = self._readTMY(weatherFile, label=label)
metdata, metadata = _tz_convert(metdata, metadata, tz_convert_val)
tzinfo = metdata.index.tzinfo
tempMetDatatitle = 'metdata_temp.csv'
# Parse the start and endtime strings.
if starttime is not None:
starttime, coerce_year = _parseTimes(starttime, 1, coerce_year)
starttime = starttime.tz_localize(tzinfo)
if endtime is not None:
endtime, coerce_year = _parseTimes(endtime, 23, coerce_year)
endtime = endtime.tz_localize(tzinfo)
'''
#TODO: do we really need this check?
if coerce_year is not None and starttime is not None:
if coerce_year != starttime.year or coerce_year != endtime.year:
print("Warning: Coerce year does not match requested sampled "+
"date(s)'s years. Setting Coerce year to None.")
coerce_year = None
'''
tmydata_trunc = self._saveTempTMY(metdata, filename=tempMetDatatitle,
starttime=starttime, endtime=endtime,
coerce_year=coerce_year,
label=label)
if tmydata_trunc.__len__() > 0:
self.metdata = MetObj(tmydata_trunc, metadata, label = label)
else:
self.metdata = None
raise Exception('Weather file returned zero points for the '
'starttime / endtime provided')
return self.metdata
def _saveTempTMY(self, tmydata, filename=None, starttime=None, endtime=None,
coerce_year=None, label=None):
'''
private function to save part or all of tmydata into /EPWs/ for use
in gencumsky -G mode and return truncated tmydata. Gencumsky 8760
starts with Jan 1, 1AM and ends Dec 31, 2400
starttime: tz-localized pd.TimeIndex
endtime: tz-localized pd.TimeIndex
returns: tmydata_truncated : subset of tmydata based on start & end
'''
if filename is None:
filename = 'temp.csv'
gencumskydata = None
gencumdict = None
if len(tmydata) == 8760:
print("8760 line in WeatherFile. Assuming this is a standard hourly"+
" WeatherFile for the year for purposes of saving Gencumulativesky"+
" temporary weather files in EPW folder.")
if coerce_year is None and starttime is not None:
coerce_year = starttime.year
# SILVANA: If user doesn't pass starttime, and doesn't select
# coerce_year, then do we really need to coerce it?
elif coerce_year is None:
coerce_year = 2021
print(f"Coercing year to {coerce_year}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# Correcting last index to next year.
tmydata.index.values[-1] = tmydata.index[-1] + pd.DateOffset(year=(coerce_year+1))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
starttime
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
else:
if len(tmydata.index.year.unique()) == 1:
if coerce_year:
# TODO: check why subhourly data still has 0 entries on the next day on _readTMY3
# in the meantime, let's make Silvana's life easy by just deletig 0 entries
tmydata = tmydata[~(tmydata.index.hour == 0)]
print(f"Coercing year to {coerce_year}")
# TODO: this coercing shows a python warning. Turn it off or find another method? bleh.
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
if coerce_year:
print("More than 1 year of data identified. Can't do coercing")
# Check if years are consecutive
l = list(tmydata.index.year.unique())
if l != list(range(min(l), max(l)+1)):
print("Years are not consecutive. Won't be able to use Gencumsky"+
" because who knows what's going on with this data.")
else:
print("Years are consecutive. For Gencumsky, make sure to select"+
" which yearly temporary weather file you want to use"+
" else they will all get accumulated to same hour/day")
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata = tmydata[filterdates] # Reducing years potentially
# Checking if filtering reduced to just 1 year to do usual savin.
if len(tmydata.index.year.unique()) == 1:
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
gencumdict = [g for n, g in tmydata.groupby(pd.Grouper(freq='Y'))]
for ii in range(0, len(gencumdict)):
gencumskydata = gencumdict[ii]
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
gencumdict[ii] = gencumskydata
gencumskydata = None # clearing so that the dictionary style can be activated.
# Let's save files in EPWs folder for Gencumsky
if gencumskydata is not None:
csvfile = os.path.join('EPWs', filename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile = csvfile
if gencumdict is not None:
self.gencumsky_metfile = []
for ii in range (0, len(gencumdict)):
gencumskydata = gencumdict[ii]
newfilename = filename.split('.')[0]+'_year_'+str(ii)+'.csv'
csvfile = os.path.join('EPWs', newfilename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile.append(csvfile)
return tmydata
def _readTMY(self, tmyfile=None, label = 'right', coerce_year=None):
'''
use pvlib to read in a tmy3 file.
Note: pvlib 0.7 does not currently support sub-hourly files. Until
then, use _readTMYdate() to create the index
Parameters
------------
tmyfile : str
Filename of tmy3 to be read with pvlib.tmy.readtmy3
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce to. Default is 2021.
Returns
-------
metdata - MetObj collected from TMY3 file
'''
def _convertTMYdate(data, meta):
''' requires pvlib 0.8, updated to handle subhourly timestamps '''
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data['Date (MM/DD/YYYY)'])
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data['Time (HH:MM)'].str[:2].astype(int) % 24
minute = data['Time (HH:MM)'].str[3:].astype(int)
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = (data_ymd + pd.to_timedelta(shifted_hour, unit='h') +
pd.to_timedelta(minute, unit='min') )
data = data.tz_localize(int(meta['TZ'] * 3600))
return data
import pvlib
#(tmydata, metadata) = pvlib.tmy.readtmy3(filename=tmyfile) #pvlib<=0.6
(tmydata, metadata) = pvlib.iotools.tmy.read_tmy3(filename=tmyfile,
coerce_year=coerce_year)
try:
tmydata = _convertTMYdate(tmydata, metadata)
except KeyError:
print('PVLib >= 0.8.0 is required for sub-hourly data input')
return tmydata, metadata
def _readEPW(self, epwfile=None, label = 'right', coerce_year=None):
"""
Uses readepw from pvlib>0.6.1 but un-do -1hr offset and
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Parameters
------------
epwfile : str
Direction and filename of the epwfile. If None, opens an interactive
loading window.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce data to.
"""
import pvlib
#import re
'''
NOTE: In PVLib > 0.6.1 the new epw.read_epw() function reads in time
with a default -1 hour offset. This is reflected in our existing
workflow.
'''
#(tmydata, metadata) = readepw(epwfile) #
(tmydata, metadata) = pvlib.iotools.epw.read_epw(epwfile,
coerce_year=coerce_year) #pvlib>0.6.1
#pvlib uses -1hr offset that needs to be un-done. Why did they do this?
tmydata.index = tmydata.index+pd.Timedelta(hours=1)
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
tmydata.rename(columns={'dni':'DNI',
'dhi':'DHI',
'temp_air':'DryBulb',
'wind_speed':'Wspd',
'ghi':'GHI',
'albedo':'Alb'
}, inplace=True)
return tmydata, metadata
def _readSOLARGIS(self, filename=None, label='center'):
"""
Read solarGIS data file which is timestamped in UTC.
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Timezone is always returned as UTC. Use tz_convert in readWeatherFile
to manually convert to local time
Parameters
------------
filename : str
filename of the solarGIS file.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval. SolarGis default style is center,
unless user requests a right label.
"""
# file format: anything with # preceding is in the header
header = []; lat = None; lon = None; elev = None; name = None
with open(filename, 'r') as result:
for line in result:
if line.startswith('#'):
header.append(line)
if line.startswith('#Latitude:'):
lat = line[11:]
if line.startswith('#Longitude:'):
lon = line[12:]
if line.startswith('#Elevation:'):
elev = line[12:17]
if line.startswith('#Site name:'):
name = line[12:-1]
else:
break
metadata = {'latitude':float(lat),
'longitude':float(lon),
'altitude':float(elev),
'Name':name,
'TZ':0.0}
# read in remainder of data
data = pd.read_csv(filename,skiprows=header.__len__(), delimiter=';')
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
data.rename(columns={'DIF':'DHI',
'TEMP':'DryBulb',
'WS':'Wspd',
}, inplace=True)
# Generate index from Date (DD.HH.YYYY) and Time
data.index = pd.to_datetime(data.Date + ' ' + data.Time,
dayfirst=True, utc=True,
infer_datetime_format = True)
return data, metadata
def getSingleTimestampTrackerAngle(self, metdata, timeindex, gcr=None,
azimuth=180, axis_tilt=0,
limit_angle=45, backtrack=True):
"""
Helper function to calculate a tracker's angle for use with the
fixed tilt routines of bifacial_radiance. It calculates tracker angle for
sun position at the timeindex passed (no left or right time offset,
label = 'center')
Parameters
----------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
timeindex : int
Index between 0 to 8760 indicating hour to simulate.
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
azimuth : float or int
Orientation axis of tracker torque tube. Default North-South (180 deg)
axis_tilt : float or int
Default 0. Axis tilt -- not implemented in sensors locations so it's pointless
at this release to change it.
limit_angle : float or int
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
backtrack : boolean
Whether backtracking is enabled (default = True)
"""
'''
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
timestamp = metdata.datetime[timeindex]
'''
import pvlib
solpos = metdata.solpos.iloc[timeindex]
sunzen = float(solpos.apparent_zenith)
sunaz = float(solpos.azimuth) # not substracting the 180
trackingdata = pvlib.tracking.singleaxis(sunzen, sunaz,
axis_tilt, azimuth,
limit_angle, backtrack, gcr)
tracker_theta = float(np.round(trackingdata['tracker_theta'],2))
tracker_theta = tracker_theta*-1 # bifacial_radiance uses East (morning) theta as positive
return tracker_theta
def gendaylit(self, timeindex, metdata=None, debug=False):
"""
Sets and returns sky information using gendaylit.
Uses PVLIB for calculating the sun position angles instead of
using Radiance internal sun position calculation (for that use gendaylit function)
Parameters
----------
timeindex : int
Index from 0 to ~4000 of the MetObj (daylight hours only)
metdata : ``MetObj``
MetObj object with list of dni, dhi, ghi and location
debug : bool
Flag to print output of sky DHI and DNI
Returns
-------
skyname : str
Sets as a self.skyname and returns filename of sky in /skies/ directory.
If errors exist, such as DNI = 0 or sun below horizon, this skyname is None
"""
import warnings
if metdata is None:
try:
metdata = self.metdata
except:
print('usage: pass metdata, or run after running ' +
'readWeatherfile() ')
return
ground = self.ground
locName = metdata.city
dni = metdata.dni[timeindex]
dhi = metdata.dhi[timeindex]
ghi = metdata.ghi[timeindex]
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
# Assign Albedos
try:
if ground.ReflAvg.shape == metdata.dni.shape:
groundindex = timeindex
elif self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
warnings.warn("Shape of ground Albedos and TMY data do not match.")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
if debug is True:
print('Sky generated with Gendaylit, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
print("Datetime TimeIndex", metdata.datetime[timeindex])
#Time conversion to correct format and offset.
#datetime = metdata.sunrisesetdata['corrected_timestamp'][timeindex]
#Don't need any of this any more. Already sunrise/sunset corrected and offset by appropriate interval
# get solar position zenith and azimuth based on site metadata
#solpos = pvlib.irradiance.solarposition.get_solarposition(datetimetz,lat,lon,elev)
solpos = metdata.solpos.iloc[timeindex]
sunalt = float(solpos.elevation)
# Radiance expects azimuth South = 0, PVlib gives South = 180. Must substract 180 to match.
sunaz = float(solpos.azimuth)-180.0
sky_path = 'skies'
if dhi <= 0:
self.skyfiles = [None]
return None
# We should already be filtering for elevation >0. But just in case...
if sunalt <= 0:
sunalt = np.arcsin((ghi-dhi)/(dni+.001))*180/np.pi # reverse engineer elevation from ghi, dhi, dni
print('Warning: negative sun elevation at '+
'{}. '.format(metdata.datetime[timeindex])+
'Re-calculated elevation: {:0.2}'.format(sunalt))
# Note - -W and -O1 option is used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# location name: " + str(locName) + " LAT: " + str(lat)
+" LON: " + str(lon) + " Elev: " + str(elev) + "\n"
"# Sun position calculated w. PVLib\n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
ground._makeGroundString(index=groundindex, cumulativesky=False)
time = metdata.datetime[timeindex]
#filename = str(time)[2:-9].replace('-','_').replace(' ','_').replace(':','_')
filename = time.strftime('%Y-%m-%d_%H%M')
skyname = os.path.join(sky_path,"sky2_%s_%s_%s.rad" %(lat, lon, filename))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def gendaylit2manual(self, dni, dhi, sunalt, sunaz):
"""
Sets and returns sky information using gendaylit.
Uses user-provided data for sun position and irradiance.
.. warning::
This generates the sky at the sun altitude&azimuth provided, make
sure it is the right position relative to how the weather data got
created and read (i.e. label right, left or center).
Parameters
------------
dni: int or float
Direct Normal Irradiance (DNI) value, in W/m^2
dhi : int or float
Diffuse Horizontal Irradiance (DHI) value, in W/m^2
sunalt : int or float
Sun altitude (degrees)
sunaz : int or float
Sun azimuth (degrees)
Returns
-------
skyname : string
Filename of sky in /skies/ directory
"""
print('Sky generated with Gendaylit 2 MANUAL, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
sky_path = 'skies'
if sunalt <= 0 or dhi <= 0:
self.skyfiles = [None]
return None
# Assign Albedos
try:
if self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
print("Ambiguous albedo entry, Set albedo to single value "
"in setGround()")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
# Note: -W and -O1 are used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# Manual inputs of DNI, DHI, SunAlt and SunAZ into Gendaylit used \n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, self.ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
self.ground._makeGroundString(index=groundindex, cumulativesky=False)
skyname = os.path.join(sky_path, "sky2_%s.rad" %(self.name))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def genCumSky(self, gencumsky_metfile=None, savefile=None):
"""
Generate Skydome using gencumsky.
.. warning::
gencumulativesky.exe is required to be installed,
which is not a standard radiance distribution.
You can find the program in the bifacial_radiance distribution directory
in \Lib\site-packages\bifacial_radiance\data
Use :func:`readWeatherFile(filename, starttime='YYYY-mm-dd_HHMM', endtime='YYYY-mm-dd_HHMM')`
to limit gencumsky simulations instead.
Parameters
------------
gencumsky_metfile : str
Filename with path to temporary created meteorological file usually created
in EPWs folder. This csv file has no headers, no index, and two
space separated columns with values for GHI and DNI for each hour
in the year, and MUST have 8760 entries long otherwise gencumulativesky.exe cries.
savefile : string
If savefile is None, defaults to "cumulative"
Returns
--------
skyname : str
Filename of the .rad file containing cumulativesky info
"""
# TODO: error checking and auto-install of gencumulativesky.exe
# TODO: add check if readWeatherfile has not be done
# TODO: check if it fails if gcc module has been loaded? (common hpc issue)
#import datetime
if gencumsky_metfile is None:
gencumsky_metfile = self.gencumsky_metfile
if isinstance(gencumsky_metfile, str):
print("Loaded ", gencumsky_metfile)
if isinstance(gencumsky_metfile, list):
print("There are more than 1 year of gencumsky temporal weather file saved."+
"You can pass which file you want with gencumsky_metfile input. Since "+
"No year was selected, defaulting to using the first year of the list")
gencumsky_metfile = gencumsky_metfile[0]
print("Loaded ", gencumsky_metfile)
if savefile is None:
savefile = "cumulative"
sky_path = 'skies'
lat = self.metdata.latitude
lon = self.metdata.longitude
timeZone = self.metdata.timezone
'''
cmd = "gencumulativesky +s1 -h 0 -a %s -o %s -m %s %s " %(lat, lon, float(timeZone)*15, filetype) +\
"-time %s %s -date %s %s %s %s %s" % (startdt.hour, enddt.hour+1,
startdt.month, startdt.day,
enddt.month, enddt.day,
gencumsky_metfile)
'''
cmd = (f"gencumulativesky +s1 -h 0 -a {lat} -o {lon} -m "
f"{float(timeZone)*15} -G {gencumsky_metfile}" )
with open(savefile+".cal","w") as f:
_,err = _popen(cmd, None, f)
if err is not None:
print(err)
# Assign Albedos
try:
groundstring = self.ground._makeGroundString(cumulativesky=True)
except:
raise Exception('Error: ground reflection not defined. '
'Run RadianceObj.setGround() first')
return
skyStr = "#Cumulative Sky Definition\n" +\
"void brightfunc skyfunc\n" + \
"2 skybright " + "%s.cal\n" % (savefile) + \
"0\n" + \
"0\n" + \
"\nskyfunc glow sky_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"\nsky_glow source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
groundstring
skyname = os.path.join(sky_path, savefile+".rad")
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]#, 'SunFile.rad' ]
return skyname
def set1axis(self, metdata=None, azimuth=180, limit_angle=45,
angledelta=5, backtrack=True, gcr=1.0 / 3, cumulativesky=True,
fixed_tilt_angle=None, useMeasuredTrackerAngle=False,
axis_azimuth=None):
"""
Set up geometry for 1-axis tracking. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met data
matches the tracking angle. Returns 'trackerdict' which has keys equal to
either the tracker angles (gencumsky workflow) or timestamps (gendaylit hourly
workflow)
Parameters
------------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
azimuth : numeric
Orientation axis of tracker torque tube. Default North-South (180 deg).
For fixed-tilt configuration, input is fixed azimuth (180 is south)
limit_angle : numeric
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
angledelta : numeric
Degree of rotation increment to parse irradiance bins. Default 5 degrees.
(0.4 % error for DNI). Other options: 4 (.25%), 2.5 (0.1%).
Note: the smaller the angledelta, the more simulations must be run.
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
cumulativesky : bool
[True] Wether individual csv files are
created with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and axis_azimuth as the tilt and azimuth
useMeasuredTrackerAngle: Bool
If True, and data for tracker angles has been passed by being included
in the WeatherFile object (column name 'Tracker Angle (degrees)'),
then tracker angles will be set to these values instead of being calculated.
NOTE that the value for azimuth passed to set1axis must be surface
azimuth in the morning and not the axis_azimuth
(i.e. for a N-S HSAT, azimuth = 90).
axis_azimuth : numeric
DEPRECATED. returns deprecation warning. Pass the tracker
axis_azimuth through to azimuth input instead.
Returns
-------
trackerdict : dictionary
Keys represent tracker tilt angles (gencumsky) or timestamps (gendaylit)
and list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
- or -
trackerdict[time]['tracker_theta';'surf_azm';'surf_tilt']
"""
# Documentation check:
# Removed Internal variables
# -------
# metdata.solpos dataframe with solar position data
# metdata.surface_azimuth list of tracker azimuth data
# metdata.surface_tilt list of tracker surface tilt data
# metdata.tracker_theta list of tracker tilt angle
import warnings
if metdata == None:
metdata = self.metdata
if metdata == {}:
raise Exception("metdata doesnt exist yet. "+
"Run RadianceObj.readWeatherFile() ")
if axis_azimuth:
azimuth = axis_azimuth
warnings.warn("axis_azimuth is deprecated in set1axis; use azimuth "
"input instead.", DeprecationWarning)
#backtrack = True # include backtracking support in later version
#gcr = 1.0/3.0 # default value - not used if backtrack = False.
# get 1-axis tracker angles for this location, rounded to nearest 'angledelta'
trackerdict = metdata._set1axis(cumulativesky=cumulativesky,
azimuth=azimuth,
limit_angle=limit_angle,
angledelta=angledelta,
backtrack=backtrack,
gcr=gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle
)
self.trackerdict = trackerdict
self.cumulativesky = cumulativesky
return trackerdict
def gendaylit1axis(self, metdata=None, trackerdict=None, startdate=None,
enddate=None, debug=False):
"""
1-axis tracking implementation of gendaylit.
Creates multiple sky files, one for each time of day.
Parameters
------------
metdata
MetObj output from readWeatherFile. Needs to have
RadianceObj.set1axis() run on it first.
startdate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
enddate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
trackerdict : dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
Returns
-------
Updated trackerdict dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
with the additional dictionary value ['skyfile'] added
"""
if metdata is None:
metdata = self.metdata
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if startdate is not None or enddate is not None:
print("Deprecation Warning: gendyalit1axis no longer downselects"+
" entries by stardate and enddate. Downselect your data"+
" when loading with readWeatherFile")
return
try:
metdata.tracker_theta # this may not exist
except AttributeError:
print("metdata.tracker_theta doesn't exist. Run RadianceObj.set1axis() first")
if debug is False:
print('Creating ~%d skyfiles. '%(len(trackerdict.keys())))
count = 0 # counter to get number of skyfiles created, just for giggles
trackerdict2={}
for i in range(0, len(trackerdict.keys())):
try:
time = metdata.datetime[i]
except IndexError: #out of range error
break #
#filename = str(time)[5:-12].replace('-','_').replace(' ','_')
filename = time.strftime('%Y-%m-%d_%H%M')
self.name = filename
#check for GHI > 0
#if metdata.ghi[i] > 0:
if (metdata.ghi[i] > 0) & (~np.isnan(metdata.tracker_theta[i])):
skyfile = self.gendaylit(metdata=metdata,timeindex=i, debug=debug)
# trackerdict2 reduces the dict to only the range specified.
trackerdict2[filename] = trackerdict[filename]
trackerdict2[filename]['skyfile'] = skyfile
count +=1
print('Created {} skyfiles in /skies/'.format(count))
self.trackerdict = trackerdict2
return trackerdict2
def genCumSky1axis(self, trackerdict=None):
"""
1-axis tracking implementation of gencumulativesky.
Creates multiple .cal files and .rad files, one for each tracker angle.
Use :func:`readWeatherFile` to limit gencumsky simulations
Parameters
------------
trackerdict : dictionary
Trackerdict generated as output by RadianceObj.set1axis()
Returns
-------
trackerdict : dictionary
Trackerdict dictionary with new entry trackerdict.skyfile
Appends 'skyfile' to the 1-axis dict with the location of the sky .radfile
"""
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
for theta in sorted(trackerdict):
# call gencumulativesky with a new .cal and .rad name
csvfile = trackerdict[theta]['csvfile']
savefile = '1axis_%s'%(theta) #prefix for .cal file and skies\*.rad file
skyfile = self.genCumSky(gencumsky_metfile=csvfile, savefile=savefile)
trackerdict[theta]['skyfile'] = skyfile
print('Created skyfile %s'%(skyfile))
# delete default skyfile (not strictly necessary)
self.skyfiles = None
self.trackerdict = trackerdict
return trackerdict
def makeOct(self, filelist=None, octname=None):
"""
Combine everything together into a .oct file
Parameters
----------
filelist : list
Files to include. otherwise takes self.filelist
octname : str
filename (without .oct extension)
Returns
-------
octname : str
filename of .oct file in root directory including extension
err : str
Error message returned from oconv (if any)
"""
if filelist is None:
filelist = self.getfilelist()
if octname is None:
octname = self.name
debug = False
#JSS. With the way that the break is handled now, this will wait the 10 for all the hours
# that were not generated sky files.
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
for file in filelist:
if debug:
print("HPC Checking for file %s" % (file))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#Filesky is being saved as 'none', so it crashes !
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print ("filenotfound")
break
#os.system('oconv '+ ' '.join(filelist) + ' > %s.oct' % (octname))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#cmd = 'oconv ' + ' '.join(filelist)
filelist.insert(0,'oconv')
with open('%s.oct' % (octname), "w") as f:
_,err = _popen(filelist, None, f)
#TODO: exception handling for no sun up
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
if err[0:7] == 'message':
warnings.warn(err[9:], Warning)
#use rvu to see if everything looks good.
# use cmd for this since it locks out the terminal.
#'rvu -vf views\side.vp -e .01 monopanel_test.oct'
print("Created %s.oct" % (octname))
self.octfile = '%s.oct' % (octname)
return '%s.oct' % (octname)
def makeOct1axis(self, trackerdict=None, singleindex=None, customname=None):
"""
Combine files listed in trackerdict into multiple .oct files
Parameters
------------
trackerdict
Output from :py:class:`~bifacial_radiance.RadianceObj.makeScene1axis`
singleindex : str
Single index for trackerdict to run makeOct1axis in single-value mode,
format 'YYYY-MM-DD_HHMM'.
customname : str
Custom text string added to the end of the OCT file name.
Returns
-------
trackerdict
Append 'octfile' to the 1-axis dict with the location of the scene .octfile
"""
if customname is None:
customname = ''
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # loop through all values in the tracker dictionary
indexlist = trackerdict.keys()
else: # just loop through one single index in tracker dictionary
indexlist = [singleindex]
print('\nMaking {} octfiles in root directory.'.format(indexlist.__len__()))
for index in sorted(indexlist): # run through either entire key list of trackerdict, or just a single value
try:
filelist = self.materialfiles + [trackerdict[index]['skyfile'], trackerdict[index]['radfile']]
octname = '1axis_%s%s'%(index, customname)
trackerdict[index]['octfile'] = self.makeOct(filelist, octname)
except KeyError as e:
print('Trackerdict key error: {}'.format(e))
return trackerdict
def makeModule(self, name=None, x=None, y=None, z=None, modulefile=None,
text=None, customtext='', xgap=0.01, ygap=0.0,
zgap=0.1, numpanels=1, rewriteModulefile=True,
glass=False, modulematerial=None, bifi=1, **kwargs):
"""
pass module generation details into ModuleObj(). See ModuleObj()
docstring for more details
"""
from bifacial_radiance import ModuleObj
if name is None:
print("usage: makeModule(name,x,y,z, modulefile = '\objects\*.rad', "+
" zgap = 0.1 (module offset)"+
"numpanels = 1 (# of panels in portrait), ygap = 0.05 "+
"(slope distance between panels when arrayed), "+
"rewriteModulefile = True (or False), bifi = 1")
print("You can also override module_type info by passing 'text'"+
"variable, or add on at the end for racking details with "+
"'customtext'. See function definition for more details")
print("Optional: tubeParams={} (torque tube details including "
"diameter (torque tube dia. in meters), tubetype='Round' "
"(or 'square', 'hex'), material='Metal_Grey' (or 'black')"
", axisofrotation=True (does scene rotate around tube)")
print("Optional: cellModule={} (create cell-level module by "+
" passing in dictionary with keys 'numcellsx'6 (#cells in "+
"X-dir.), 'numcellsy', 'xcell' (cell size in X-dir. in meters),"+
"'ycell', 'xcellgap' (spacing between cells in X-dir.), 'ycellgap'")
print("Optional: omegaParams={} (create the support structure omega by "+
"passing in dictionary with keys 'omega_material' (the material of "+
"omega), 'mod_overlap'(the length of the module adjacent piece of"+
" omega that overlaps with the module),'x_omega1', 'y_omega' (ideally same"+
" for all the parts of omega),'z_omega1', 'x_omega2' (X-dir length of the"+
" vertical piece), 'x_omega3', z_omega3")
return
"""
# TODO: check for deprecated torquetube and axisofrotationTorqueTube in
kwargs.
"""
if 'tubeParams' in kwargs:
tubeParams = kwargs.pop('tubeParams')
else:
tubeParams = None
if 'torquetube' in kwargs:
torquetube = kwargs.pop('torquetube')
print("\nWarning: boolean input `torquetube` passed into makeModule"
". Starting in v0.4.0 this boolean parameter is deprecated."
" Use module.addTorquetube() with `visible` parameter instead.")
if tubeParams:
tubeParams['visible'] = torquetube
elif (tubeParams is None) & (torquetube is True):
tubeParams = {'visible':True} # create default TT
if 'axisofrotationTorqueTube' in kwargs:
axisofrotation = kwargs.pop('axisofrotationTorqueTube')
print("\nWarning: input boolean `axisofrotationTorqueTube` passed "
"into makeModule. Starting in v0.4.0 this boolean parameter is"
" deprecated. Use module.addTorquetube() with `axisofrotation`"
"parameter instead.")
if tubeParams: #this kwarg only does somehting if there's a TT.
tubeParams['axisofrotation'] = axisofrotation
if self.hpc: # trigger HPC simulation in ModuleObj
kwargs['hpc']=True
self.module = ModuleObj(name=name, x=x, y=y, z=z, bifi=bifi, modulefile=modulefile,
text=text, customtext=customtext, xgap=xgap, ygap=ygap,
zgap=zgap, numpanels=numpanels,
rewriteModulefile=rewriteModulefile, glass=glass,
modulematerial=modulematerial, tubeParams=tubeParams,
**kwargs)
return self.module
def makeCustomObject(self, name=None, text=None):
"""
Function for development and experimenting with extraneous objects in the scene.
This function creates a `name.rad` textfile in the objects folder
with whatever text that is passed to it.
It is up to the user to pass the correct radiance format.
For example, to create a box at coordinates 0,0 (with its bottom surface
on the plane z=0):
.. code-block:
name = 'box'
text='! genbox black PVmodule 0.5 0.5 0.5 | xform -t -0.25 -0.25 0'
Parameters
----------
name : str
String input to name the module type
text : str
Text used in the radfile to generate the module
"""
customradfile = os.path.join('objects', '%s.rad'%(name)) # update in 0.2.3 to shorten radnames
# py2 and 3 compatible: binary write, encode text first
with open(customradfile, 'wb') as f:
f.write(text.encode('ascii'))
print("\nCustom Object Name", customradfile)
self.customradfile = customradfile
return customradfile
def printModules(self):
# print available module types from ModuleObj
from bifacial_radiance import ModuleObj
modulenames = ModuleObj().readModule()
print('Available module names: {}'.format([str(x) for x in modulenames]))
return modulenames
def makeScene(self, module=None, sceneDict=None, radname=None,
moduletype=None):
"""
Create a SceneObj which contains details of the PV system configuration including
tilt, row pitch, height, nMods per row, nRows in the system...
Parameters
----------
module : str or ModuleObj
String name of module created with makeModule()
sceneDict : dictionary
Dictionary with keys: `tilt`, `clearance_height`*, `pitch`,
`azimuth`, `nMods`, `nRows`, `hub_height`*, `height`*
* height deprecated from sceneDict. For makeScene (fixed systems)
if passed it is assumed it reffers to clearance_height.
`clearance_height` recommended for fixed_tracking systems.
`hub_height` can also be passed as a possibility.
radname : str
Gives a custom name to the scene file. Useful when parallelizing.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
-------
SceneObj
'scene' with configuration details
"""
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('makeScene(module, sceneDict, nMods, nRows). '+\
'Available moduletypes: ' )
self.printModules() #print available module types
return
self.scene = SceneObj(module)
self.scene.hpc = self.hpc #pass HPC mode from parent
if sceneDict is None:
print('makeScene(moduletype, sceneDict, nMods, nRows). '+\
'sceneDict inputs: .tilt .clearance_height .pitch .azimuth')
return self.scene
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
# Fixed tilt routine
# Preferred: clearance_height,
# If only height is passed, it is assumed to be clearance_height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='clearance_height',
nonpreferred='hub_height')
self.nMods = sceneDict['nMods']
self.nRows = sceneDict['nRows']
self.sceneRAD = self.scene._makeSceneNxR(sceneDict=sceneDict,
radname=radname)
if 'appendRadfile' not in sceneDict:
appendRadfile = False
else:
appendRadfile = sceneDict['appendRadfile']
if appendRadfile:
debug = False
try:
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDED!")
except:
#TODO: Manage situation where radfile was created with
#appendRadfile to False first..
self.radfiles=[]
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDAGE created!")
else:
self.radfiles = [self.sceneRAD]
return self.scene
def appendtoScene(self, radfile=None, customObject=None, text=''):
"""
Appends to the `Scene radfile` in folder `\objects` the text command in Radiance
lingo created by the user.
Useful when using addCustomObject to the scene.
Parameters
----------
radfile: str
Directory and name of where .rad scene file is stored
customObject : str
Directory and name of custom object .rad file is stored
text : str
Command to be appended to the radfile. Do not leave empty spaces
at the end.
Returns
-------
Nothing, the radfile must already be created and assigned when running this.
"""
#TODO: Add a custom name and replace radfile name
# py2 and 3 compatible: binary write, encode text first
text2 = '\n' + text + ' ' + customObject
debug = False
if debug:
print (text2)
with open(radfile, 'a+') as f:
f.write(text2)
def makeScene1axis(self, trackerdict=None, module=None, sceneDict=None,
cumulativesky=None, moduletype=None):
"""
Creates a SceneObj for each tracking angle which contains details of the PV
system configuration including row pitch, hub_height, nMods per row, nRows in the system...
Parameters
------------
trackerdict
Output from GenCumSky1axis
module : str or ModuleObj
Name or ModuleObj created with makeModule()
sceneDict :
Dictionary with keys:`tilt`, `hub_height`, `pitch`, `azimuth`
cumulativesky : bool
Defines if sky will be generated with cumulativesky or gendaylit.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
--------
trackerdict
Append the following keys
'radfile'
directory where .rad scene file is stored
'scene'
SceneObj for each tracker theta
'clearance_height'
Calculated ground clearance based on
`hub height`, `tilt` angle and overall collector width `sceney`
"""
import math
if sceneDict is None:
print('usage: makeScene1axis(module, sceneDict, nMods, nRows).'+
'sceneDict inputs: .hub_height .azimuth .nMods .nRows'+
'and .pitch or .gcr')
return
# If no nRows or nMods assigned on deprecated variable or dictionary,
# assign default.
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if cumulativesky is None:
try:
# see if cumulativesky = False was set earlier,
# e.g. in RadianceObj.set1axis
cumulativesky = self.cumulativesky
except AttributeError:
# default cumulativesky = true to maintain backward compatibility.
cumulativesky = True
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('usage: makeScene1axis(trackerdict, module, '+
'sceneDict, nMods, nRows). ')
self.printModules() #print available module types
return
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
# 1axis routine
# Preferred hub_height
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight:
simplefix = 0
hubheight = sceneDict['clearance_height'] # Not really, but this is the fastest
# to make it work with the simplefix as below the actual clearnace height
# gets calculated and the 0 sets the cosine correction to 0.
# TODO CLEAN THIS UP.
else:
#the hub height is the tracker height at center of rotation.
hubheight = sceneDict['hub_height']
simplefix = 1
if cumulativesky is True: # cumulativesky workflow
print('\nMaking .rad files for cumulativesky 1-axis workflow')
for theta in trackerdict:
scene = SceneObj(module)
if trackerdict[theta]['surf_azm'] >= 180:
trackerdict[theta]['surf_azm'] = trackerdict[theta]['surf_azm']-180
trackerdict[theta]['surf_tilt'] = trackerdict[theta]['surf_tilt']*-1
radname = '1axis%s_'%(theta,)
# Calculating clearance height for this theta.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
# Calculate the ground clearance height based on the hub height. Add abs(theta) to avoid negative tilt angle errors
trackerdict[theta]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed, not pitch
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[theta]['radfile'] = radfile
trackerdict[theta]['scene'] = scene
print('{} Radfiles created in /objects/'.format(trackerdict.__len__()))
else: #gendaylit workflow
print('\nMaking ~%s .rad files for gendaylit 1-axis workflow (this takes a minute..)' % (len(trackerdict)))
count = 0
for time in trackerdict:
scene = SceneObj(module)
if trackerdict[time]['surf_azm'] >= 180:
trackerdict[time]['surf_azm'] = trackerdict[time]['surf_azm']-180
trackerdict[time]['surf_tilt'] = trackerdict[time]['surf_tilt']*-1
theta = trackerdict[time]['theta']
radname = '1axis%s_'%(time,)
# Calculating clearance height for this time.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
if trackerdict[time]['ghi'] > 0:
trackerdict[time]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed instead of pitch
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[time]['radfile'] = radfile
trackerdict[time]['scene'] = scene
count+=1
print('{} Radfiles created in /objects/'.format(count))
self.trackerdict = trackerdict
self.nMods = sceneDict['nMods'] #assign nMods and nRows to RadianceObj
self.nRows = sceneDict['nRows']
self.hub_height = hubheight
return trackerdict
def analysis1axis(self, trackerdict=None, singleindex=None, accuracy='low',
customname=None, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
modscanfront = None, modscanback = None, relative=False,
debug=False ):
"""
Loop through trackerdict and runs linescans for each scene and scan in there.
Parameters
----------------
trackerdict
singleindex : str
For single-index mode, just the one index we want to run (new in 0.2.3).
Example format '21_06_14_12_30' for 2021 June 14th 12:30 pm
accuracy : str
'low' or 'high', resolution option used during _irrPlot and rtrace
customname : str
Custom text string to be added to the file name for the results .CSV files
modWanted : int
Module to be sampled. Index starts at 1.
rowWanted : int
Row to be sampled. Index starts at 1. (row 1)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
modscanfront : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
modscanback : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
debug : Bool
Activates internal printing of the function to help debugging.
Returns
-------
trackerdict with new keys:
'AnalysisObj' : analysis object for this tracker theta
'Wm2Front' : list of front Wm-2 irradiances, len=sensorsy_back
'Wm2Back' : list of rear Wm-2 irradiances, len=sensorsy_back
'backRatio' : list of rear irradiance ratios, len=sensorsy_back
RadianceObj with new appended values:
'Wm2Front' : np Array with front irradiance cumulative
'Wm2Back' : np Array with rear irradiance cumulative
'backRatio' : np Array with rear irradiance ratios
"""
import warnings
if customname is None:
customname = ''
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # run over all values in trackerdict
trackerkeys = sorted(trackerdict.keys())
else: # run in single index mode.
trackerkeys = [singleindex]
if modWanted == None:
modWanted = round(self.nMods / 1.99)
if rowWanted == None:
rowWanted = round(self.nRows / 1.99)
frontWm2 = 0 # container for tracking front irradiance across module chord. Dynamically size based on first analysis run
backWm2 = 0 # container for tracking rear irradiance across module chord.
for index in trackerkeys: # either full list of trackerdict keys, or single index
name = '1axis_%s%s'%(index,customname)
octfile = trackerdict[index]['octfile']
scene = trackerdict[index]['scene']
if octfile is None:
continue # don't run analysis if the octfile is none
try: # look for missing data
analysis = AnalysisObj(octfile,name)
name = '1axis_%s%s'%(index,customname,)
frontscanind, backscanind = analysis.moduleAnalysis(scene=scene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
analysis.analysis(octfile=octfile,name=name,frontscan=frontscanind,backscan=backscanind,accuracy=accuracy)
trackerdict[index]['AnalysisObj'] = analysis
except Exception as e: # problem with file. TODO: only catch specific error types here.
warnings.warn('Index: {}. Problem with file. Error: {}. Skipping'.format(index,e), Warning)
return
#combine cumulative front and back irradiance for each tracker angle
try: #on error, trackerdict[index] is returned empty
trackerdict[index]['Wm2Front'] = analysis.Wm2Front
trackerdict[index]['Wm2Back'] = analysis.Wm2Back
trackerdict[index]['backRatio'] = analysis.backRatio
except AttributeError as e: # no key Wm2Front.
warnings.warn('Index: {}. Trackerdict key not found: {}. Skipping'.format(index,e), Warning)
return
if np.sum(frontWm2) == 0: # define frontWm2 the first time through
frontWm2 = np.array(analysis.Wm2Front)
backWm2 = np.array(analysis.Wm2Back)
else:
frontWm2 += np.array(analysis.Wm2Front)
backWm2 += np.array(analysis.Wm2Back)
print('Index: {}. Wm2Front: {}. Wm2Back: {}'.format(index,
np.mean(analysis.Wm2Front), np.mean(analysis.Wm2Back)))
if np.sum(self.Wm2Front) == 0:
self.Wm2Front = frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back = backWm2
else:
self.Wm2Front += frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back += backWm2
self.backRatio = np.mean(backWm2)/np.mean(frontWm2+.001)
# Save compiled results using _saveresults
if singleindex is None:
print ("Saving a cumulative-results file in the main simulation folder." +
"This adds up by sensor location the irradiance over all hours " +
"or configurations considered." +
"\nWarning: This file saving routine does not clean results, so "+
"if your setup has ygaps, or 2+modules or torque tubes, doing "+
"a deeper cleaning and working with the individual results "+
"files in the results folder is highly suggested.")
cumfilename = 'cumulative_results_%s.csv'%(customname)
if self.cumulativesky is True:
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
temptrackerdict = trackerdict[list(trackerdict)[0]]['AnalysisObj']
#temptrackerdict = trackerdict[0.0]['AnalysisObj']
frontcum ['x'] = temptrackerdict.x
frontcum ['y'] = temptrackerdict.y
frontcum ['z'] = temptrackerdict.z
frontcum ['mattype'] = temptrackerdict.mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = temptrackerdict.x
rearcum ['y'] = temptrackerdict.x
rearcum ['z'] = temptrackerdict.rearZ
rearcum ['mattype'] = temptrackerdict.rearMat
rearcum ['Wm2'] = self.Wm2Back
cumanalysisobj = AnalysisObj()
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
else: # trackerkeys are day/hour/min, and there's no easy way to find a
# tilt of 0, so making a fake linepoint object for tilt 0
# and then saving.
try:
cumscene = trackerdict[trackerkeys[0]]['scene']
cumscene.sceneDict['tilt']=0
cumscene.sceneDict['clearance_height'] = self.hub_height
cumanalysisobj = AnalysisObj()
frontscancum, backscancum = cumanalysisobj.moduleAnalysis(scene=cumscene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
x,y,z = cumanalysisobj._linePtsArray(frontscancum)
x,y,rearz = cumanalysisobj._linePtsArray(backscancum)
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
frontcum ['x'] = x
frontcum ['y'] = y
frontcum ['z'] = z
frontcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = x
rearcum ['y'] = y
rearcum ['z'] = rearz
rearcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].rearMat
rearcum ['Wm2'] = self.Wm2Back
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
except:
print("Not able to save a cumulative result for this simulation.")
return trackerdict
# End RadianceObj definition
class GroundObj:
"""
Class to set and return details for the ground surface materials and reflectance.
If 1 albedo value is passed, it is used as default.
If 3 albedo values are passed, they are assigned to each of the three wavelength placeholders (RGB),
If material type is known, it is used to get reflectance info.
if material type isn't known, material_info.list is returned
Parameters
------------
materialOrAlbedo : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
"""
def __init__(self, materialOrAlbedo=None, material_file=None):
import warnings
from numbers import Number
self.normval = None
self.ReflAvg = None
self.Rrefl = None
self.Grefl = None
self.Brefl = None
self.ground_type = 'custom'
if material_file is None:
material_file = 'ground.rad'
self.material_file = material_file
if materialOrAlbedo is None: # Case where it's none.
print('\nInput albedo 0-1, or string from ground.printGroundMaterials().'
'\nAlternatively, run setGround after readWeatherData()'
'and setGround will read metdata.albedo if available')
return
if isinstance(materialOrAlbedo, str) :
self.ground_type = materialOrAlbedo
# Return the RGB albedo for material ground_type
materialOrAlbedo = self.printGroundMaterials(self.ground_type)
# Check for double and int.
if isinstance(materialOrAlbedo, Number):
materialOrAlbedo = np.array([[materialOrAlbedo,
materialOrAlbedo, materialOrAlbedo]])
if isinstance(materialOrAlbedo, list):
materialOrAlbedo = np.asarray(materialOrAlbedo)
# By this point, materialOrAlbedo should be a np.ndarray:
if isinstance(materialOrAlbedo, np.ndarray):
if materialOrAlbedo.ndim == 0:
# numpy array of one single value, i.e. np.array(0.62)
# after this if, np.array([0.62])
materialOrAlbedo = materialOrAlbedo.reshape([1])
if materialOrAlbedo.ndim == 1:
# If np.array is ([0.62]), this repeats it so at the end it's
# np.array ([0.62, 0.62, 0.62])
materialOrAlbedo = np.repeat(np.array([materialOrAlbedo]),
3, axis=1).reshape(
len(materialOrAlbedo),3)
if (materialOrAlbedo.ndim == 2) & (materialOrAlbedo.shape[1] > 3):
warnings.warn("Radiance only raytraces 3 wavelengths at "
"a time. Trimming albedo np.array input to "
"3 wavelengths.")
materialOrAlbedo = materialOrAlbedo[:,0:3]
# By this point we should have np.array of dim=2 and shape[1] = 3.
# Check for invalid values
if (materialOrAlbedo > 1).any() or (materialOrAlbedo < 0).any():
print('Warning: albedo values greater than 1 or less than 0. '
'Constraining to [0..1]')
materialOrAlbedo = materialOrAlbedo.clip(min=0, max=1)
try:
self.Rrefl = materialOrAlbedo[:,0]
self.Grefl = materialOrAlbedo[:,1]
self.Brefl = materialOrAlbedo[:,2]
self.normval = _normRGB(materialOrAlbedo[:,0],materialOrAlbedo[:,1],
materialOrAlbedo[:,2])
self.ReflAvg = np.round(np.mean(materialOrAlbedo, axis=1),4)
print(f'Loading albedo, {self.ReflAvg.__len__()} value(s), '
f'{self._nonzeromean(self.ReflAvg):0.3f} avg\n'
f'{self.ReflAvg[self.ReflAvg != 0].__len__()} nonzero albedo values.')
except IndexError as e:
print('albedo.shape should be 3 column (N x 3)')
raise e
def printGroundMaterials(self, materialString=None):
"""
printGroundMaterials(materialString=None)
input: None or materialString. If None, return list of acceptable
material types from ground.rad. If valid string, return RGB albedo
of the material type selected.
"""
import warnings
material_path = 'materials'
f = open(os.path.join(material_path, self.material_file))
keys = [] #list of material key names
Rreflall = []; Greflall=[]; Breflall=[] #RGB material reflectance
temp = f.read().split()
f.close()
#return indices for 'plastic' definition
index = _findme(temp,'plastic')
for i in index:
keys.append(temp[i+1])# after plastic comes the material name
Rreflall.append(float(temp[i+5]))#RGB reflectance comes a few more down the list
Greflall.append(float(temp[i+6]))
Breflall.append(float(temp[i+7]))
if materialString is not None:
try:
index = _findme(keys,materialString)[0]
except IndexError:
warnings.warn('Error - materialString not in '
f'{self.material_file}: {materialString}')
return(np.array([[Rreflall[index], Greflall[index], Breflall[index]]]))
else:
return(keys)
def _nonzeromean(self, val):
''' array mean excluding zero. return zero if everything's zero'''
tempmean = np.nanmean(val)
if tempmean > 0:
tempmean = np.nanmean(val[val !=0])
return tempmean
def _makeGroundString(self, index=0, cumulativesky=False):
'''
create string with ground reflectance parameters for use in
gendaylit and gencumsky.
Parameters
-----------
index : integer
Index of time for time-series albedo. Default 0
cumulativesky: Boolean
If true, set albedo to average of time series values.
Returns
-------
groundstring: text with albedo details to append to sky.rad in
gendaylit
'''
try:
if cumulativesky is True:
Rrefl = self._nonzeromean(self.Rrefl)
Grefl = self._nonzeromean(self.Grefl)
Brefl = self._nonzeromean(self.Brefl)
normval = _normRGB(Rrefl, Grefl, Brefl)
else:
Rrefl = self.Rrefl[index]
Grefl = self.Grefl[index]
Brefl = self.Brefl[index]
normval = _normRGB(Rrefl, Grefl, Brefl)
# Check for all zero albedo case
if normval == 0:
normval = 1
groundstring = ( f'\nskyfunc glow ground_glow\n0\n0\n4 '
f'{Rrefl/normval} {Grefl/normval} {Brefl/normval} 0\n'
'\nground_glow source ground\n0\n0\n4 0 0 -1 180\n'
f'\nvoid plastic {self.ground_type}\n0\n0\n5 '
f'{Rrefl:0.3f} {Grefl:0.3f} {Brefl:0.3f} 0 0\n'
f"\n{self.ground_type} ring groundplane\n"
'0\n0\n8\n0 0 -.01\n0 0 1\n0 100' )
except IndexError as err:
print(f'Index {index} passed to albedo with only '
f'{self.Rrefl.__len__()} values.' )
raise err
return groundstring
class SceneObj:
'''
scene information including PV module type, bifaciality, array info
pv module orientation defaults: Azimuth = 180 (south)
pv module origin: z = 0 bottom of frame. y = 0 lower edge of frame.
x = 0 vertical centerline of module
scene includes module details (x,y,bifi, sceney (collector_width), scenex)
'''
def __repr__(self):
return str(self.__dict__)
def __init__(self, module=None):
''' initialize SceneObj
'''
from bifacial_radiance import ModuleObj
# should sceneDict be initialized here? This is set in _makeSceneNxR
if module is None:
return
elif type(module) == str:
self.module = ModuleObj(name=module)
elif type(module) == ModuleObj: # try moduleObj
self.module = module
#self.moduleDict = self.module.getDataDict()
#self.scenex = self.module.scenex
#self.sceney = self.module.sceney
#self.offsetfromaxis = self.moduleDict['offsetfromaxis']
#TODO: get rid of these 4 values
self.modulefile = self.module.modulefile
self.hpc = False #default False. Set True by makeScene after sceneobj created.
def _makeSceneNxR(self, modulename=None, sceneDict=None, radname=None):
"""
Arrange module defined in :py:class:`bifacial_radiance.SceneObj` into a N x R array.
Returns a :py:class:`bifacial_radiance.SceneObj` which contains details
of the PV system configuration including `tilt`, `row pitch`, `hub_height`
or `clearance_height`, `nMod`s per row, `nRows` in the system.
The returned scene has (0,0) coordinates centered at the module at the
center of the array. For 5 rows, that is row 3, for 4 rows, that is
row 2 also (rounds down). For 5 modules in the row, that is module 3,
for 4 modules in the row, that is module 2 also (rounds down)
Parameters
------------
modulename: str
Name of module created with :py:class:`~bifacial_radiance.RadianceObj.makeModule`.
sceneDict : dictionary
Dictionary of scene parameters.
clearance_height : numeric
(meters).
pitch : numeric
Separation between rows
tilt : numeric
Valid input ranges -90 to 90 degrees
azimuth : numeric
A value denoting the compass direction along which the
axis of rotation lies. Measured in decimal degrees East
of North. [0 to 180) possible.
nMods : int
Number of modules per row (default = 20)
nRows : int
Number of rows in system (default = 7)
radname : str
String for name for radfile.
Returns
-------
radfile : str
Filename of .RAD scene in /objects/
scene : :py:class:`~bifacial_radiance.SceneObj `
Returns a `SceneObject` 'scene' with configuration details
"""
if modulename is None:
modulename = self.module.name
if sceneDict is None:
print('makeScene(modulename, sceneDict, nMods, nRows). sceneDict'
' inputs: .tilt .azimuth .nMods .nRows'
' AND .tilt or .gcr ; AND .hub_height or .clearance_height')
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'axis_tilt' not in sceneDict:
sceneDict['axis_tilt'] = 0
if 'originx' not in sceneDict:
sceneDict['originx'] = 0
if 'originy' not in sceneDict:
sceneDict['originy'] = 0
if radname is None:
radname = str(self.module.name).strip().replace(' ', '_')
# loading variables
tilt = sceneDict['tilt']
azimuth = sceneDict['azimuth']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
axis_tilt = sceneDict['axis_tilt']
originx = sceneDict ['originx']
originy = sceneDict['originy']
# hub_height, clearance_height and height logic.
# this routine uses hub_height to move the panels up so it's important
# to have a value for that, either obtianing from clearance_height
# (if coming from makeScene) or from hub_height itself.
# it is assumed that if no clearance_height or hub_height is passed,
# hub_height = height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict, preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight :
hubheight = sceneDict['clearance_height'] + 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney - self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
title_clearance_height = sceneDict['clearance_height']
else:
hubheight = sceneDict['hub_height']
# this calculates clearance_height, used for the title
title_clearance_height = sceneDict['hub_height'] - 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney + self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
try:
if sceneDict['pitch'] >0:
pitch = sceneDict['pitch']
else:
raise Exception('default to gcr')
except:
if 'gcr' in sceneDict:
pitch = np.round(self.module.sceney/sceneDict['gcr'],3)
else:
raise Exception('No valid `pitch` or `gcr` in sceneDict')
''' INITIALIZE VARIABLES '''
text = '!xform '
text += '-rx %s -t %s %s %s ' %(tilt, 0, 0, hubheight)
# create nMods-element array along x, nRows along y. 1cm module gap.
text += '-a %s -t %s 0 0 -a %s -t 0 %s 0 ' %(nMods, self.module.scenex, nRows, pitch)
# azimuth rotation of the entire shebang. Select the row to scan here based on y-translation.
# Modifying so center row is centered in the array. (i.e. 3 rows, row 2. 4 rows, row 2 too)
# Since the array is already centered on row 1, module 1, we need to increment by Nrows/2-1 and Nmods/2-1
text += (f'-i 1 -t {-self.module.scenex*(round(nMods/1.999)*1.0-1)} '
f'{-pitch*(round(nRows / 1.999)*1.0-1)} 0 -rz {180-azimuth} '
f'-t {originx} {originy} 0 ' )
#axis tilt only working for N-S trackers
if axis_tilt != 0 and azimuth == 90:
print("Axis_Tilt is still under development. The scene will be "
"created with the proper axis tilt, and the tracking angle"
"will consider the axis_tilt, but the sensors for the "
"analysis might not fall in the correct surfaces unless you"
" manually position them for this version. Sorry! :D ")
text += (f'-rx {axis_tilt} -t 0 0 %s ' %(
self.module.scenex*(round(nMods/1.99)*1.0-1)*np.sin(
axis_tilt * np.pi/180) ) )
filename = (f'{radname}_C_{title_clearance_height:0.5f}_rtr_{pitch:0.5f}_tilt_{tilt:0.5f}_'
f'{nMods}modsx{nRows}rows_origin{originx},{originy}.rad' )
if self.hpc:
text += f'"{os.path.join(os.getcwd(), self.modulefile)}"'
radfile = os.path.join(os.getcwd(), 'objects', filename)
else:
text += os.path.join(self.modulefile)
radfile = os.path.join('objects',filename )
# py2 and 3 compatible: binary write, encode text first
with open(radfile, 'wb') as f:
f.write(text.encode('ascii'))
self.gcr = self.module.sceney / pitch
self.text = text
self.radfiles = radfile
self.sceneDict = sceneDict
# self.hub_height = hubheight
return radfile
def showScene(self):
"""
Method to call objview on the scene included in self
"""
cmd = 'objview %s %s' % (os.path.join('materials', 'ground.rad'),
self.radfiles)
print('Rendering scene. This may take a moment...')
_,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml'
' into your RADIANCE binaries path')
return
# end of SceneObj
class MetObj:
"""
Meteorological data from EPW file.
Initialize the MetObj from tmy data already read in.
Parameters
-----------
tmydata : DataFrame
TMY3 output from :py:class:`~bifacial_radiance.RadianceObj.readTMY` or
from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
metadata : Dictionary
Metadata output from output from :py:class:`~bifacial_radiance.RadianceObj.readTMY``
or from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
label : str
label : str
'left', 'right', or 'center'. For data that is averaged, defines if the
timestamp refers to the left edge, the right edge, or the center of the
averaging interval, for purposes of calculating sunposition. For
example, TMY3 data is right-labeled, so 11 AM data represents data from
10 to 11, and sun position should be calculated at 10:30 AM. Currently
SAM and PVSyst use left-labeled interval data and NSRDB uses centered.
"""
def __init__(self, tmydata, metadata, label = 'right'):
import pytz
import pvlib
#import numpy as np
#First prune all GHI = 0 timepoints. New as of 0.4.0
# TODO: is this a good idea? This changes default behavior...
tmydata = tmydata[tmydata.GHI > 0]
# location data. so far needed:
# latitude, longitude, elevation, timezone, city
self.latitude = metadata['latitude']; lat=self.latitude
self.longitude = metadata['longitude']; lon=self.longitude
self.elevation = metadata['altitude']; elev=self.elevation
self.timezone = metadata['TZ']
try:
self.city = metadata['Name'] # readepw version
except KeyError:
self.city = metadata['city'] # pvlib version
#self.location.state_province_region = metadata['State'] # unecessary
self.datetime = tmydata.index.tolist() # this is tz-aware.
self.ghi = np.array(tmydata.GHI)
self.dhi = np.array(tmydata.DHI)
self.dni = np.array(tmydata.DNI)
try:
self.albedo = np.array(tmydata.Alb)
except AttributeError: # no TMY albedo data
self.albedo = None
# Try and retrieve dewpoint and pressure
try:
self.dewpoint = np.array(tmydata['temp_dew'])
except KeyError:
self.dewpoint = None
try:
self.pressure = np.array(tmydata['atmospheric_pressure'])
except KeyError:
self.pressure = None
try:
self.temp_air = np.array(tmydata['temp_air'])
except KeyError:
self.temp_air = None
try:
self.wind_speed = np.array(tmydata['wind_speed'])
except KeyError:
self.wind_speed = None
# Try and retrieve TrackerAngle
try:
self.meastracker_angle = np.array(tmydata['Tracker Angle (degrees)'])
except KeyError:
self.meastracker_angle= None
#v0.2.5: initialize MetObj with solpos, sunrise/set and corrected time
datetimetz = pd.DatetimeIndex(self.datetime)
try: # make sure the data is tz-localized.
datetimetz = datetimetz.tz_localize(pytz.FixedOffset(self.timezone*60))# use pytz.FixedOffset (in minutes)
except TypeError: # data is tz-localized already. Just put it in local time.
datetimetz = datetimetz.tz_convert(pytz.FixedOffset(self.timezone*60))
#check for data interval. default 1h.
try:
interval = datetimetz[1]-datetimetz[0]
except IndexError:
interval = pd.Timedelta('1h') # ISSUE: if 1 datapoint is passed, are we sure it's hourly data?
print ("WARNING: TMY interval was unable to be defined, so setting it to 1h.")
# TODO: Refactor this into a subfunction. first calculate minutedelta
# based on label and interval (-30, 0, +30, +7.5 etc) then correct all.
if label.lower() == 'center':
print("Calculating Sun position for center labeled data, at exact timestamp in input Weather File")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['corrected_timestamp'] = datetimetz
else:
if interval== pd.Timedelta('1h'):
if label.lower() == 'right':
print("Calculating Sun position for Metdata that is right-labeled ",
"with a delta of -30 mins. i.e. 12 is 11:30 sunpos")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['minutedelta']= int(interval.seconds/2/60) # default sun angle 30 minutes before timestamp
# vector update of minutedelta at sunrise
sunrisemask = sunup.index.hour-1==sunup['sunrise'].dt.hour
sunup['minutedelta'].mask(sunrisemask,np.floor((60-(sunup['sunrise'].dt.minute))/2),inplace=True)
# vector update of minutedelta at sunset
sunsetmask = sunup.index.hour-1==sunup['sunset'].dt.hour
sunup['minutedelta'].mask(sunsetmask,np.floor((60-(sunup['sunset'].dt.minute))/2),inplace=True)
# save corrected timestamp
sunup['corrected_timestamp'] = sunup.index-pd.to_timedelta(sunup['minutedelta'], unit='m')
elif label.lower() == 'left':
print("Calculating Sun position for Metdata that is left-labeled ",
"with a delta of +30 mins. i.e. 12 is 12:30 sunpos.")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon)
sunup['minutedelta']= int(interval.seconds/2/60) # default sun angle 30 minutes after timestamp
# vector update of minutedelta at sunrise
sunrisemask = sunup.index.hour==sunup['sunrise'].dt.hour
sunup['minutedelta'].mask(sunrisemask,np.ceil((60+sunup['sunrise'].dt.minute)/2),inplace=True)
# vector update of minutedelta at sunset
sunsetmask = sunup.index.hour==sunup['sunset'].dt.hour
sunup['minutedelta'].mask(sunsetmask,np.ceil((60+sunup['sunset'].dt.minute)/2),inplace=True)
# save corrected timestamp
sunup['corrected_timestamp'] = sunup.index+pd.to_timedelta(sunup['minutedelta'], unit='m')
else: raise ValueError('Error: invalid weather label passed. Valid inputs: right, left or center')
else:
minutedelta = int(interval.seconds/2/60)
print("Interval in weather data is less than 1 hr, calculating"
f" Sun position with a delta of -{minutedelta} minutes.")
print("If you want no delta for sunposition, use "
"readWeatherFile( label='center').")
#datetimetz=datetimetz-pd.Timedelta(minutes = minutedelta) # This doesn't check for Sunrise or Sunset
#sunup= pvlib.irradiance.solarposition.get_sun_rise_set_transit(datetimetz, lat, lon) # deprecated in pvlib 0.6.1
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['corrected_timestamp'] = sunup.index-pd.Timedelta(minutes = minutedelta)
self.solpos = pvlib.irradiance.solarposition.get_solarposition(sunup['corrected_timestamp'],lat,lon,elev)
self.sunrisesetdata=sunup
self.label = label
def _set1axis(self, azimuth=180, limit_angle=45, angledelta=None,
backtrack=True, gcr=1.0/3.0, cumulativesky=True,
fixed_tilt_angle=None, axis_tilt=0, useMeasuredTrackerAngle=False):
"""
Set up geometry for 1-axis tracking cumulativesky. Solpos data
already stored in `metdata.solpos`. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met
data matches the tracking angle.
Parameters
------------
cumulativesky : bool
Whether individual csv files are created
with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
azimuth : numerical
orientation axis of tracker torque tube. Default North-South (180 deg)
For fixed tilt simulations this is the orientation azimuth
limit_angle : numerical
+/- limit angle of the 1-axis tracker in degrees. Default 45
angledelta : numerical
Degree of rotation increment to parse irradiance bins.
Default 5 degrees (0.4 % error for DNI).
Other options: 4 (.25%), 2.5 (0.1%).
(the smaller the angledelta, the more simulations)
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
axis_tilt : float
Tilt of the axis. While it can be considered for the tracking calculation,
the scene geometry creation of the trackers does not support tilte
axis_trackers yet (but can be done manuallyish. See Tutorials)
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and azimuth as the tilt and azimuth
Returns
-------
trackerdict : dictionary
Keys for tracker tilt angles and
list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
metdata.solpos : dataframe
Dataframe with output from pvlib solar position for each timestep
metdata.sunrisesetdata :
Pandas dataframe with sunrise, sunset and adjusted time data.
metdata.tracker_theta : list
Tracker tilt angle from pvlib for each timestep
metdata.surface_tilt : list
Tracker surface tilt angle from pvlib for each timestep
metdata.surface_azimuth : list
Tracker surface azimuth angle from pvlib for each timestep
"""
#axis_tilt = 0 # only support 0 tilt trackers for now
self.cumulativesky = cumulativesky # track whether we're using cumulativesky or gendaylit
if (cumulativesky is True) & (angledelta is None):
angledelta = 5 # round angle to 5 degrees for cumulativesky
# get 1-axis tracker angles for this location,
# round to nearest 'angledelta'
if self.meastracker_angle is not None and useMeasuredTrackerAngle is True:
print("Tracking Data: Reading from provided Tracker Angles")
elif self.meastracker_angle is None and useMeasuredTrackerAngle is True:
useMeasuredTrackerAngle = False
print("Warning: Using Measured Tracker Angles was specified but DATA"+
" for trackers has not yet been assigned. "+
" Assign it by making it a column on your Weatherdata File "+
"named 'Tracker Angle (degrees)' and run ReadWeatherFile again")
trackingdata = self._getTrackingAngles(azimuth,
limit_angle,
angledelta,
axis_tilt = axis_tilt,
backtrack = backtrack,
gcr = gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle)
# get list of unique rounded tracker angles
theta_list = trackingdata.dropna()['theta_round'].unique()
if cumulativesky is True:
# create a separate metfile for each unique tracker theta angle.
# return dict of filenames and details
trackerdict = self._makeTrackerCSV(theta_list,trackingdata)
else:
# trackerdict uses timestamp as keys. return azimuth
# and tilt for each timestamp
#times = [str(i)[5:-12].replace('-','_').replace(' ','_') for i in self.datetime]
times = [i.strftime('%Y-%m-%d_%H%M') for i in self.datetime]
#trackerdict = dict.fromkeys(times)
trackerdict = {}
for i,time in enumerate(times) :
# remove NaN tracker theta from trackerdict
if (self.ghi[i] > 0) & (~np.isnan(self.tracker_theta[i])):
trackerdict[time] = {
'surf_azm':self.surface_azimuth[i],
'surf_tilt':self.surface_tilt[i],
'theta':self.tracker_theta[i],
'ghi':self.ghi[i],
'dhi':self.dhi[i]
}
return trackerdict
def _getTrackingAngles(self, azimuth=180, limit_angle=45,
angledelta=None, axis_tilt=0, backtrack=True,
gcr = 1.0/3.0, fixed_tilt_angle=None,
useMeasuredTrackerAngle=False):
'''
Helper subroutine to return 1-axis tracker tilt and azimuth data.
Parameters
----------
same as pvlib.tracking.singleaxis, plus:
angledelta : degrees
Angle to round tracker_theta to. This is for
cumulativesky simulations. Other input options: None (no
rounding of tracker angle)
fixed_tilt_angle : (Optional) degrees
This changes to a fixed tilt simulation where each hour uses
fixed_tilt_angle and azimuth as the tilt and azimuth
Returns
-------
DataFrame with the following columns:
* tracker_theta: The rotation angle of the tracker.
tracker_theta = 0 is horizontal, and positive rotation angles
are clockwise.
* aoi: The angle-of-incidence of direct irradiance onto the
rotated panel surface.
* surface_tilt: The angle between the panel surface and the earth
surface, accounting for panel rotation.
* surface_azimuth: The azimuth of the rotated panel, determined by
projecting the vector normal to the panel's surface to the
earth's surface.
* 'theta_round' : tracker_theta rounded to the nearest 'angledelta'
If no angledelta is specified, it is rounded to the nearest degree.
'''
import pvlib
import warnings
from pvlib.irradiance import aoi
#import numpy as np
#import pandas as pd
solpos = self.solpos
#New as of 0.3.2: pass fixed_tilt_angle and switches to FIXED TILT mode
if fixed_tilt_angle is not None:
# system with fixed tilt = fixed_tilt_angle
surface_tilt=fixed_tilt_angle
surface_azimuth=azimuth
# trackingdata keys: 'tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'
trackingdata = pd.DataFrame({'tracker_theta':fixed_tilt_angle,
'aoi':aoi(surface_tilt, surface_azimuth,
solpos['zenith'],
solpos['azimuth']),
'surface_azimuth':azimuth,
'surface_tilt':fixed_tilt_angle})
elif useMeasuredTrackerAngle:
# tracked system
surface_tilt=self.meastracker_angle
surface_azimuth=azimuth
trackingdata = pd.DataFrame({'tracker_theta':self.meastracker_angle,
'aoi':aoi(surface_tilt, surface_azimuth,
solpos['zenith'],
solpos['azimuth']),
'surface_azimuth':azimuth,
'surface_tilt':abs(self.meastracker_angle)})
else:
# get 1-axis tracker tracker_theta, surface_tilt and surface_azimuth
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
trackingdata = pvlib.tracking.singleaxis(solpos['zenith'],
solpos['azimuth'],
axis_tilt,
azimuth,
limit_angle,
backtrack,
gcr)
# save tracker tilt information to metdata.tracker_theta,
# metdata.surface_tilt and metdata.surface_azimuth
self.tracker_theta = np.round(trackingdata['tracker_theta'],2).tolist()
self.surface_tilt = np.round(trackingdata['surface_tilt'],2).tolist()
self.surface_azimuth = np.round(trackingdata['surface_azimuth'],2).tolist()
# undo the timestamp offset put in by solpos.
#trackingdata.index = trackingdata.index + pd.Timedelta(minutes = 30)
# It may not be exactly 30 minutes any more...
trackingdata.index = self.sunrisesetdata.index #this has the original time data in it
# round tracker_theta to increments of angledelta for use in cumulativesky
def _roundArbitrary(x, base=angledelta):
# round to nearest 'base' value.
# mask NaN's to avoid rounding error message
return base * (x/float(base)).round()
if angledelta == 0:
raise ZeroDivisionError('Angledelta = 0. Use None instead')
elif angledelta is None: # don't round theta
trackingdata['theta_round'] = trackingdata['tracker_theta']
else: # round theta
trackingdata['theta_round'] = \
_roundArbitrary(trackingdata['tracker_theta'], angledelta)
return trackingdata
def _makeTrackerCSV(self, theta_list, trackingdata):
'''
Create multiple new irradiance csv files with data for each unique
rounded tracker angle. Return a dictionary with the new csv filenames
and other details, Used for cumulativesky tracking
Parameters
-----------
theta_list : array
Array of unique tracker angle values
trackingdata : Pandas
Pandas Series with hourly tracker angles from
:pvlib.tracking.singleaxis
Returns
--------
trackerdict : dictionary
keys: *theta_round tracker angle (default: -45 to +45 in
5 degree increments).
sub-array keys:
*datetime: array of datetime strings in this group of angles
*count: number of datapoints in this group of angles
*surf_azm: tracker surface azimuth during this group of angles
*surf_tilt: tilt angle average during this group of angles
*csvfile: name of csv met data file saved in /EPWs/
'''
dt = pd.to_datetime(self.datetime)
trackerdict = dict.fromkeys(theta_list)
for theta in sorted(trackerdict):
trackerdict[theta] = {}
csvfile = os.path.join('EPWs', '1axis_{}.csv'.format(theta))
tempdata = trackingdata[trackingdata['theta_round'] == theta]
#Set up trackerdict output for each value of theta
trackerdict[theta]['csvfile'] = csvfile
trackerdict[theta]['surf_azm'] = tempdata['surface_azimuth'].median()
trackerdict[theta]['surf_tilt'] = abs(theta)
datetimetemp = tempdata.index.strftime('%Y-%m-%d %H:%M:%S') #local time
trackerdict[theta]['datetime'] = datetimetemp
trackerdict[theta]['count'] = datetimetemp.__len__()
#Create new temp csv file with zero values for all times not equal to datetimetemp
# write 8760 2-column csv: GHI,DHI
ghi_temp = []
dhi_temp = []
for g, d, time in zip(self.ghi, self.dhi,
dt.strftime('%Y-%m-%d %H:%M:%S')):
# is this time included in a particular theta_round angle?
if time in datetimetemp:
ghi_temp.append(g)
dhi_temp.append(d)
else:
# mask out irradiance at this time, since it
# belongs to a different bin
ghi_temp.append(0.0)
dhi_temp.append(0.0)
# save in 2-column GHI,DHI format for gencumulativesky -G
savedata = pd.DataFrame({'GHI':ghi_temp, 'DHI':dhi_temp},
index = self.datetime).tz_localize(None)
# Fill partial year. Requires 2021 measurement year.
savedata = _subhourlydatatoGencumskyformat(savedata,
label=self.label)
print('Saving file {}, # points: {}'.format(
trackerdict[theta]['csvfile'], datetimetemp.__len__()))
savedata.to_csv(csvfile,
index=False,
header=False,
sep=' ',
columns=['GHI','DHI'])
return trackerdict
class AnalysisObj:
"""
Analysis class for performing raytrace to obtain irradiance measurements
at the array, as well plotting and reporting results.
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, octfile=None, name=None, hpc=False):
"""
Initialize AnalysisObj by pointing to the octfile. Scan information
is defined separately by passing scene details into AnalysisObj.moduleAnalysis()
Parameters
------------
octfile : string
Filename and extension of .oct file
name :
hpc : boolean, default False. Waits for octfile for a
longer time if parallel processing.
"""
self.octfile = octfile
self.name = name
self.hpc = hpc
def makeImage(self, viewfile, octfile=None, name=None):
"""
Makes a visible image (rendering) of octfile, viewfile
"""
import time
if octfile is None:
octfile = self.octfile
if name is None:
name = self.name
#TODO: update this for cross-platform compatibility w/ os.path.join
if self.hpc :
time_to_wait = 10
time_counter = 0
filelist = [octfile, "views/"+viewfile]
for file in filelist:
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:break
print('Generating visible render of scene')
#TODO: update this for cross-platform compatibility w os.path.join
os.system("rpict -dp 256 -ar 48 -ms 1 -ds .2 -dj .9 -dt .1 "+
"-dc .5 -dr 1 -ss 1 -st .1 -ab 3 -aa .1 "+
"-ad 1536 -as 392 -av 25 25 25 -lr 8 -lw 1e-4 -vf views/"
+viewfile+ " " + octfile +
" > images/"+name+viewfile[:-3] +".hdr")
def makeFalseColor(self, viewfile, octfile=None, name=None):
"""
Makes a false-color plot of octfile, viewfile
.. note::
For Windows requires installation of falsecolor.exe,
which is part of radwinexe-5.0.a.8-win64.zip found at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml
"""
#TODO: error checking for installation of falsecolor.exe
if octfile is None:
octfile = self.octfile
if name is None:
name = self.name
print('Generating scene in WM-2. This may take some time.')
#TODO: update and test this for cross-platform compatibility using os.path.join
cmd = "rpict -i -dp 256 -ar 48 -ms 1 -ds .2 -dj .9 -dt .1 "+\
"-dc .5 -dr 1 -ss 1 -st .1 -ab 3 -aa .1 -ad 1536 -as 392 " +\
"-av 25 25 25 -lr 8 -lw 1e-4 -vf views/"+viewfile + " " + octfile
WM2_out,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
return
# determine the extreme maximum value to help with falsecolor autoscale
extrm_out,err = _popen("pextrem",WM2_out.encode('latin1'))
# cast the pextrem string as a float and find the max value
WM2max = max(map(float,extrm_out.split()))
print('Saving scene in false color')
#auto scale false color map
if WM2max < 1100:
cmd = "falsecolor -l W/m2 -m 1 -s 1100 -n 11"
else:
cmd = "falsecolor -l W/m2 -m 1 -s %s"%(WM2max,)
with open(os.path.join("images","%s%s_FC.hdr"%(name,viewfile[:-3]) ),"w") as f:
data,err = _popen(cmd,WM2_out.encode('latin1'),f)
if err is not None:
print(err)
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml')
def _linePtsArray(self, linePtsDict):
"""
Helper function to just print the x y and z values in an array format,
just like they will show in the .csv result files.
"""
xstart = linePtsDict['xstart']
ystart = linePtsDict['ystart']
zstart = linePtsDict['zstart']
xinc = linePtsDict['xinc']
yinc = linePtsDict['yinc']
zinc = linePtsDict['zinc']
sx_xinc = linePtsDict['sx_xinc']
sx_yinc = linePtsDict['sx_yinc']
sx_zinc = linePtsDict['sx_zinc']
Nx = int(linePtsDict['Nx'])
Ny = int(linePtsDict['Ny'])
Nz = int(linePtsDict['Nz'])
x = []
y = []
z = []
for iz in range(0,Nz):
for ix in range(0,Nx):
for iy in range(0,Ny):
x . append(xstart+iy*xinc+ix*sx_xinc)
y . append(ystart+iy*yinc+ix*sx_yinc)
z . append(zstart+iy*zinc+ix*sx_zinc)
return x, y, z
def _linePtsMakeDict(self, linePtsDict):
a = linePtsDict
linepts = self._linePtsMake3D(a['xstart'],a['ystart'],a['zstart'],
a['xinc'], a['yinc'], a['zinc'],
a['sx_xinc'], a['sx_yinc'], a['sx_zinc'],
a['Nx'],a['Ny'],a['Nz'],a['orient'])
return linepts
def _linePtsMake3D(self, xstart, ystart, zstart, xinc, yinc, zinc,
sx_xinc, sx_yinc, sx_zinc,
Nx, Ny, Nz, orient):
#create linepts text input with variable x,y,z.
#If you don't want to iterate over a variable, inc = 0, N = 1.
linepts = ""
# make sure Nx, Ny, Nz are ints.
Nx = int(Nx)
Ny = int(Ny)
Nz = int(Nz)
for iz in range(0,Nz):
for ix in range(0,Nx):
for iy in range(0,Ny):
xpos = xstart+iy*xinc+ix*sx_xinc
ypos = ystart+iy*yinc+ix*sx_yinc
zpos = zstart+iy*zinc+ix*sx_zinc
linepts = linepts + str(xpos) + ' ' + str(ypos) + \
' '+str(zpos) + ' ' + orient + " \r"
return(linepts)
def _irrPlot(self, octfile, linepts, mytitle=None, plotflag=None,
accuracy='low'):
"""
(plotdict) = _irrPlot(linepts,title,time,plotflag, accuracy)
irradiance plotting using rtrace
pass in the linepts structure of the view along with a title string
for the plots.
Parameters
------------
octfile : string
Filename and extension of .oct file
linepts :
Output from :py:class:`bifacial_radiance.AnalysisObj._linePtsMake3D`
mytitle : string
Title to append to results files
plotflag : Boolean
Include plot of resulting irradiance
accuracy : string
Either 'low' (default - faster) or 'high'
(better for low light)
Returns
-------
out : dictionary
out.x,y,z - coordinates of point
.r,g,b - r,g,b values in Wm-2
.Wm2 - equal-weight irradiance
.mattype - material intersected
.title - title passed in
"""
if mytitle is None:
mytitle = octfile[:-4]
if plotflag is None:
plotflag = False
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
while not os.path.exists(octfile):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print('Warning: OCTFILE NOT FOUND')
break
if octfile is None:
print('Analysis aborted. octfile = None' )
return None
keys = ['<KEY>']
out = {key: [] for key in keys}
#out = dict.fromkeys(['Wm2','x','y','z','r','g','b','mattype','title'])
out['title'] = mytitle
print ('Linescan in process: %s' %(mytitle))
#rtrace ambient values set for 'very accurate':
#cmd = "rtrace -i -ab 5 -aa .08 -ar 512 -ad 2048 -as 512 -h -oovs "+ octfile
if accuracy == 'low':
#rtrace optimized for faster scans: (ab2, others 96 is too coarse)
cmd = "rtrace -i -ab 2 -aa .1 -ar 256 -ad 2048 -as 256 -h -oovs "+ octfile
elif accuracy == 'high':
#rtrace ambient values set for 'very accurate':
cmd = "rtrace -i -ab 5 -aa .08 -ar 512 -ad 2048 -as 512 -h -oovs "+ octfile
else:
print('_irrPlot accuracy options: "low" or "high"')
return({})
temp_out,err = _popen(cmd,linepts.encode())
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
else:
print(err)
# when file errors occur, temp_out is None, and err message is printed.
if temp_out is not None:
for line in temp_out.splitlines():
temp = line.split('\t')
out['x'].append(float(temp[0]))
out['y'].append(float(temp[1]))
out['z'].append(float(temp[2]))
out['r'].append(float(temp[3]))
out['g'].append(float(temp[4]))
out['b'].append(float(temp[5]))
out['mattype'].append(temp[6])
out['Wm2'].append(sum([float(i) for i in temp[3:6]])/3.0)
if plotflag is True:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(out['Wm2'])
plt.ylabel('Wm2 irradiance')
plt.xlabel('variable')
plt.title(mytitle)
plt.show()
else:
out = None # return empty if error message.
return(out)
def _saveResults(self, data=None, reardata=None, savefile=None, RGB = False):
"""
Function to save output from _irrPlot
If rearvals is passed in, back ratio is saved
If data = None then only reardata is saved.
Returns
--------
savefile : str
If set to None, will write to default .csv filename in results folder.
"""
if savefile is None:
savefile = data['title'] + '.csv'
if data is None and reardata is not None: # only rear data is passed.
data = reardata
reardata = None
# run process like normal but swap labels at the end
rearswapflag = True
else:
rearswapflag = False
# make savefile dataframe and set self.attributes
if RGB:
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'mattype', 'Wm2','r', 'g', 'b' ]}
else:
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'mattype','Wm2' ]}
df = pd.DataFrame(data_sub)
df = df.rename(columns={'Wm2':'Wm2Front'})
if reardata is not None:
df.insert(3, 'rearZ', reardata['z'])
df.insert(5, 'rearMat', reardata['mattype'])
df.insert(7, 'Wm2Back', reardata['Wm2'])
# add 1mW/m2 to avoid dividebyzero
df.insert(8, 'Back/FrontRatio', df['Wm2Back'] / (df['Wm2Front']+.001))
df['backRatio'] = df['Back/FrontRatio']
df['rearX'] = reardata['x']
df['rearY'] = reardata['y']
if RGB:
df['rearR'] = reardata['r']
df['rearG'] = reardata['g']
df['rearB'] = reardata['b']
#df = df[['x','y','z','rearZ','mattype','rearMat',
# 'Wm2Front','Wm2Back','Back/FrontRatio',
# 'r','g','b', 'rearR','rearG','rearB']]
#else:
#df = df[['x','y','z','rearZ','mattype','rearMat',
# 'Wm2Front','Wm2Back','Back/FrontRatio']]
#else:
# if RGB:
# df = df[['x','y','z', 'mattype','Wm2Front', 'r', 'g', 'b']]
#
# else:
# df = df[['x','y','z', 'mattype','Wm2Front']]
# rename columns if only rear data was originally passed
if rearswapflag:
df = df.rename(columns={'Wm2Front':'Wm2Back','mattype':'rearMat'})
# set attributes of analysis to equal columns of df
for col in df.columns:
setattr(self, col, list(df[col]))
# only save a subset
df = df.drop(columns=['rearX','rearY','backRatio'], errors='ignore')
df.to_csv(os.path.join("results", savefile), sep = ',',
index = False)
print('Saved: %s'%(os.path.join("results", savefile)))
return os.path.join("results", savefile)
def _saveResultsCumulative(self, data, reardata=None, savefile=None):
"""
TEMPORARY FUNCTION -- this is a fix to save ONE cumulative results csv
in the main working folder for when doing multiple entries in a
tracker dict.
Returns
--------
savefile : str
If set to None, will write to default .csv filename in results folder.
"""
if savefile is None:
savefile = data['title'] + '.csv'
# make dataframe from results
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'Wm2', 'mattype']}
self.x = data['x']
self.y = data['y']
self.z = data['z']
self.mattype = data['mattype']
#TODO: data_sub front values don't seem to be saved to self.
if reardata is not None:
self.rearX = reardata['x']
self.rearY = reardata['y']
self.rearMat = reardata['mattype']
data_sub['rearMat'] = self.rearMat
self.rearZ = reardata['z']
data_sub['rearZ'] = self.rearZ
self.Wm2Front = data_sub.pop('Wm2')
data_sub['Wm2Front'] = self.Wm2Front
self.Wm2Back = reardata['Wm2']
data_sub['Wm2Back'] = self.Wm2Back
self.backRatio = [x/(y+.001) for x,y in zip(reardata['Wm2'],data['Wm2'])] # add 1mW/m2 to avoid dividebyzero
data_sub['Back/FrontRatio'] = self.backRatio
df = pd.DataFrame.from_dict(data_sub)
df.to_csv(savefile, sep = ',',
columns = ['x','y','z','rearZ','mattype','rearMat',
'Wm2Front','Wm2Back','Back/FrontRatio'],
index = False) # new in 0.2.3
else:
df = pd.DataFrame.from_dict(data_sub)
df.to_csv(savefile, sep = ',',
columns = ['x','y','z', 'mattype','Wm2'], index = False)
print('Saved: %s'%(savefile))
return (savefile)
def moduleAnalysis(self, scene, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
frontsurfaceoffset=0.001, backsurfaceoffset=0.001,
modscanfront=None, modscanback=None, relative=False,
debug=False):
"""
Handler function that decides how to handle different number of front
and back sensors. If number for front sensors is not provided or is
the same as for the back, _moduleAnalysis
is called only once. Else it is called twice to get the different front
and back dictionary.
This function defines the scan points to be used in the
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function,
to perform the raytrace through Radiance function `rtrace`
Parameters
------------
scene : ``SceneObj``
Generated with :py:class:`~bifacial_radiance.RadianceObj.makeScene`.
modWanted : int
Module wanted to sample. If none, defaults to center module (rounding down)
rowWanted : int
Row wanted to sample. If none, defaults to center row (rounding down)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
debug : bool
Activates various print statemetns for debugging this function.
modscanfront : dict
Dictionary to modify the fronstcan values established by this routine
and set a specific value. Keys possible are 'xstart', 'ystart', 'zstart',
'xinc', 'yinc', 'zinc', 'Nx', 'Ny', 'Nz', and 'orient'. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected.
modscanback: dict
Dictionary to modify the backscan values established by this routine
and set a specific value. Keys possible are 'xstart', 'ystart', 'zstart',
'xinc', 'yinc', 'zinc', 'Nx', 'Ny', 'Nz', and 'orient'. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
frontscan : dictionary
Scan dictionary for module's front side. Used to pass into
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function
backscan : dictionary
Scan dictionary for module's back side. Used to pass into
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function
"""
# Height: clearance height for fixed tilt systems, or torque tube
# height for single-axis tracked systems.
# Single axis tracked systems will consider the offset to calculate the final height.
def _checkSensors(sensors):
# Checking Sensors input data for list or tuple
if (type(sensors)==tuple or type(sensors)==list):
try:
sensors_back = sensors[1]
sensors_front = sensors[0]
except IndexError: # only 1 value passed??
sensors_back = sensors_front = sensors[0]
elif (type(sensors)==int or type(sensors)==float):
# Ensure sensors are positive int values.
if int(sensors) < 1:
raise Exception('input sensorsy must be numeric >0')
sensors_back = sensors_front = int(sensors)
else:
print('Warning: invalid value passed for sensors. Setting = 1')
sensors_back = sensors_front = 1
return sensors_front, sensors_back
sensorsy_front, sensorsy_back = _checkSensors(sensorsy)
sensorsx_front, sensorsx_back = _checkSensors(sensorsx)
if (sensorsx_back != sensorsx_front) or (sensorsy_back != sensorsy_front):
sensors_diff = True
else:
sensors_diff = False
dtor = np.pi/180.0
# Internal scene parameters are stored in scene.sceneDict. Load these into local variables
sceneDict = scene.sceneDict
azimuth = sceneDict['azimuth']
tilt = sceneDict['tilt']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
originx = sceneDict['originx']
originy = sceneDict['originy']
# offset = moduleDict['offsetfromaxis']
offset = scene.module.offsetfromaxis
sceney = scene.module.sceney
scenex = scene.module.scenex
# x needed for sensorsx>1 case
x = scene.module.x
## Check for proper input variables in sceneDict
if 'pitch' in sceneDict:
pitch = sceneDict['pitch']
elif 'gcr' in sceneDict:
pitch = sceney / sceneDict['gcr']
else:
raise Exception("Error: no 'pitch' or 'gcr' passed in sceneDict" )
if 'axis_tilt' in sceneDict:
axis_tilt = sceneDict['axis_tilt']
else:
axis_tilt = 0
if hasattr(scene.module,'z'):
modulez = scene.module.z
else:
print ("Module's z not set on sceneDict internal dictionary. Setting to default")
modulez = 0.02
if frontsurfaceoffset is None:
frontsurfaceoffset = 0.001
if backsurfaceoffset is None:
backsurfaceoffset = 0.001
# The Sensor routine below needs a "hub-height", not a clearance height.
# The below complicated check checks to see if height (deprecated) is passed,
# and if clearance_height or hub_height is passed as well.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred = 'hub_height',
nonpreferred = 'clearance_height')
if use_clearanceheight :
height = sceneDict['clearance_height'] + 0.5* \
np.sin(abs(tilt) * np.pi / 180) * \
sceney - offset*np.sin(abs(tilt)*np.pi/180)
else:
height = sceneDict['hub_height']
if debug:
print("For debug:\n hub_height, Azimuth, Tilt, nMods, nRows, "
"Pitch, Offset, SceneY, SceneX")
print(height, azimuth, tilt, nMods, nRows,
pitch, offset, sceney, scenex)
if modWanted == 0:
print( " FYI Modules and Rows start at index 1. "
"Reindexing to modWanted 1" )
modWanted = modWanted+1 # otherwise it gives results on Space.
if rowWanted ==0:
print( " FYI Modules and Rows start at index 1. "
"Reindexing to rowWanted 1" )
rowWanted = rowWanted+1
if modWanted is None:
modWanted = round(nMods / 1.99)
if rowWanted is None:
rowWanted = round(nRows / 1.99)
if debug is True:
print( f"Sampling: modWanted {modWanted}, rowWanted {rowWanted} "
"out of {nMods} modules, {nRows} rows" )
x0 = (modWanted-1)*scenex - (scenex*(round(nMods/1.99)*1.0-1))
y0 = (rowWanted-1)*pitch - (pitch*(round(nRows / 1.99)*1.0-1))
x1 = x0 * np.cos ((180-azimuth)*dtor) - y0 * np.sin((180-azimuth)*dtor)
y1 = x0 * np.sin ((180-azimuth)*dtor) + y0 * np.cos((180-azimuth)*dtor)
z1 = 0
if axis_tilt != 0 and azimuth == 90:
print ("fixing height for axis_tilt")
z1 = (modWanted-1)*scenex * np.sin(axis_tilt*dtor)
# Edge of Panel
x2 = (sceney/2.0) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
y2 = (sceney/2.0) * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
z2 = -(sceney/2.0) * np.sin(tilt*dtor)
# Axis of rotation Offset (if offset is not 0) for the front of the module
x3 = (offset + modulez + frontsurfaceoffset) * np.sin(tilt*dtor) * np.sin((azimuth)*dtor)
y3 = (offset + modulez + frontsurfaceoffset) * np.sin(tilt*dtor) * np.cos((azimuth)*dtor)
z3 = (offset + modulez + frontsurfaceoffset) * np.cos(tilt*dtor)
# Axis of rotation Offset, for the back of the module
x4 = (offset - backsurfaceoffset) * np.sin(tilt*dtor) * np.sin((azimuth)*dtor)
y4 = (offset - backsurfaceoffset) * np.sin(tilt*dtor) * np.cos((azimuth)*dtor)
z4 = (offset - backsurfaceoffset) * np.cos(tilt*dtor)
xstartfront = x1 + x2 + x3 + originx
xstartback = x1 + x2 + x4 + originx
ystartfront = y1 + y2 + y3 + originy
ystartback = y1 + y2 + y4 + originy
zstartfront = height + z1 + z2 + z3
zstartback = height + z1 + z2 + z4
#Adjust orientation of scan depending on tilt & azimuth
zdir = np.cos((tilt)*dtor)
ydir = np.sin((tilt)*dtor) * np.cos((azimuth)*dtor)
xdir = np.sin((tilt)*dtor) * np.sin((azimuth)*dtor)
front_orient = '%0.3f %0.3f %0.3f' % (-xdir, -ydir, -zdir)
back_orient = '%0.3f %0.3f %0.3f' % (xdir, ydir, zdir)
#IF cellmodule:
#TODO: Add check for sensorsx_back
#temp = scene.moduleDict.get('cellModule') #error-free way to query it
#if ((temp is not None) and
if ((getattr(scene.module, 'cellModule', None)) and
(sensorsy_back == scene.module.cellModule.numcellsy)):
ycell = scene.module.cellModule.ycell
xinc_back = -((sceney - ycell ) / (scene.module.cellModule.numcellsy-1)) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
yinc_back = -((sceney - ycell) / (scene.module.cellModule.numcellsy-1)) * | np.cos((tilt)*dtor) | numpy.cos |
"""
@package ion_functions.data.perf.test_adcp_performance
@file ion_functions/data/perf/test_adcp_performance.py
@author <NAME>
@brief Performance tests for adcp_functions module
"""
import numpy as np
from nose.plugins.attrib import attr
from ion_functions.data.perf.test_performance import PerformanceTestCase
from ion_functions.data import flo_functions as fl
@attr('PERF', group='func')
class TestADCPPerformance(PerformanceTestCase):
def setUp(self):
# set test inputs
### optical backscatter ###
self.scat_counts = 55
self.scat_dark = 47
self.scat_scale = 3.058e-6
self.beta = fl.flo_beta(self.scat_counts, self.scat_dark, self.scat_scale)
self.degC = 20.0
self.psu = 32.0
### chla ###
self.chla_counts = 55
self.chla_dark = 45
self.chla_scale = 0.0121
### cdom ###
self.cdom_counts = 55
self.cdom_dark = 48
self.cdom_scale = 0.0848
def test_flo_bback_total(self):
stats = []
beta = np.repeat(self.beta, 1000000)
degC = np.repeat(self.degC, 1000000)
psu = np.repeat(self.psu, 1000000)
self.profile(stats, fl.flo_bback_total, beta, degC, psu)
def test_flo_beam(self):
stats = []
counts = np.repeat(self.scat_counts, 1000000)
dark = np.repeat(self.scat_dark, 1000000)
scale = np.repeat(self.scat_scale, 1000000)
self.profile(stats, fl.flo_beta, counts, dark, scale)
def test_flo_cdom(self):
stats = []
counts = np.repeat(self.cdom_counts, 1000000)
dark = np.repeat(self.cdom_dark, 1000000)
scale = np.repeat(self.cdom_scale, 1000000)
self.profile(stats, fl.flo_cdom, counts, dark, scale)
def test_flo_chla(self):
stats = []
counts = | np.repeat(self.chla_counts, 1000000) | numpy.repeat |
import pandas as pd
import numpy as np
import json
import os
import csv
import sys
import warnings
from datetime import datetime
from math import floor
from sklearn.preprocessing import LabelBinarizer, LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.metrics import log_loss, accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, mean_squared_error, mean_absolute_error, r2_score
from sklearn.feature_extraction.text import CountVectorizer
import xgboost as xgb
def build_encoders(df):
"""Builds encoders for fields to be used when
processing data for the model.
All encoder specifications are stored in locally
in /encoders as .json files.
# Arguments
df: A pandas DataFrame containing the data.
"""
# Pclass
pclass_tf = df['Pclass'].values
pclass_encoder = LabelBinarizer()
pclass_encoder.fit(pclass_tf)
with open(os.path.join('encoders', 'pclass_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(pclass_encoder.classes_.tolist(),
outfile, ensure_ascii=False)
# Sex
sex_tf = df['Sex'].values
sex_encoder = LabelBinarizer()
sex_encoder.fit(sex_tf)
with open(os.path.join('encoders', 'sex_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(sex_encoder.classes_.tolist(), outfile, ensure_ascii=False)
# Age
age_enc = df['Age']
age_encoder = MinMaxScaler()
age_encoder_attrs = ['min_', 'scale_']
age_encoder.fit(df['Age'].values.reshape(-1, 1))
age_encoder_dict = {attr: getattr(age_encoder, attr).tolist()
for attr in age_encoder_attrs}
with open(os.path.join('encoders', 'age_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(age_encoder_dict, outfile, ensure_ascii=False)
# Siblings/Spouses Aboard
siblings_spouses_aboard_tf = df['Siblings/Spouses Aboard'].values
siblings_spouses_aboard_encoder = LabelBinarizer()
siblings_spouses_aboard_encoder.fit(siblings_spouses_aboard_tf)
with open(os.path.join('encoders', 'siblings_spouses_aboard_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(siblings_spouses_aboard_encoder.classes_.tolist(),
outfile, ensure_ascii=False)
# Parents/Children Aboard
parents_children_aboard_tf = df['Parents/Children Aboard'].values
parents_children_aboard_encoder = LabelBinarizer()
parents_children_aboard_encoder.fit(parents_children_aboard_tf)
with open(os.path.join('encoders', 'parents_children_aboard_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(parents_children_aboard_encoder.classes_.tolist(),
outfile, ensure_ascii=False)
# Fare
fare_enc = df['Fare']
fare_encoder = MinMaxScaler()
fare_encoder_attrs = ['min_', 'scale_']
fare_encoder.fit(df['Fare'].values.reshape(-1, 1))
fare_encoder_dict = {attr: getattr(fare_encoder, attr).tolist()
for attr in fare_encoder_attrs}
with open(os.path.join('encoders', 'fare_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(fare_encoder_dict, outfile, ensure_ascii=False)
# Target Field: Survived
survived_encoder = LabelEncoder()
survived_encoder.fit(df['Survived'].values)
with open(os.path.join('encoders', 'survived_encoder.json'),
'w', encoding='utf8') as outfile:
json.dump(survived_encoder.classes_.tolist(),
outfile, ensure_ascii=False)
def load_encoders():
"""Loads the encoders built during `build_encoders`.
# Returns
encoders: A dict of encoder objects/specs.
"""
encoders = {}
# Pclass
pclass_encoder = LabelBinarizer()
with open(os.path.join('encoders', 'pclass_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
pclass_encoder.classes_ = json.load(infile)
encoders['pclass_encoder'] = pclass_encoder
# Sex
sex_encoder = LabelBinarizer()
with open(os.path.join('encoders', 'sex_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
sex_encoder.classes_ = json.load(infile)
encoders['sex_encoder'] = sex_encoder
# Age
age_encoder = MinMaxScaler()
age_encoder_attrs = ['min_', 'scale_']
with open(os.path.join('encoders', 'age_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
age_attrs = json.load(infile)
for attr, value in age_attrs.items():
setattr(age_encoder, attr, value)
encoders['age_encoder'] = age_encoder
# Siblings/Spouses Aboard
siblings_spouses_aboard_encoder = LabelBinarizer()
with open(os.path.join('encoders', 'siblings_spouses_aboard_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
siblings_spouses_aboard_encoder.classes_ = json.load(infile)
encoders['siblings_spouses_aboard_encoder'] = siblings_spouses_aboard_encoder
# Parents/Children Aboard
parents_children_aboard_encoder = LabelBinarizer()
with open(os.path.join('encoders', 'parents_children_aboard_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
parents_children_aboard_encoder.classes_ = json.load(infile)
encoders['parents_children_aboard_encoder'] = parents_children_aboard_encoder
# Fare
fare_encoder = MinMaxScaler()
fare_encoder_attrs = ['min_', 'scale_']
with open(os.path.join('encoders', 'fare_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
fare_attrs = json.load(infile)
for attr, value in fare_attrs.items():
setattr(fare_encoder, attr, value)
encoders['fare_encoder'] = fare_encoder
# Target Field: Survived
survived_encoder = LabelEncoder()
with open(os.path.join('encoders', 'survived_encoder.json'),
'r', encoding='utf8', errors='ignore') as infile:
survived_encoder.classes_ = np.array(json.load(infile))
encoders['survived_encoder'] = survived_encoder
return encoders
def process_data(df, encoders, process_target=True):
"""Processes an input DataFrame into a format
sutable for model prediction.
This function loads the encoder specifications created in
`build_encoders`.
# Arguments
df: a DataFrame containing the source data
encoders: a dict of encoders to process the data.
process_target: boolean to determine if the target should be encoded.
# Returns
A tuple: A list containing all the processed fields to be fed
into the model, and the processed target field.
"""
# Pclass
pclass_enc = df['Pclass'].values
pclass_enc = encoders['pclass_encoder'].transform(pclass_enc)
# Sex
sex_enc = df['Sex'].values
sex_enc = encoders['sex_encoder'].transform(sex_enc)
# Age
age_enc = df['Age'].values.reshape(-1, 1)
age_enc = encoders['age_encoder'].transform(age_enc)
# Siblings/Spouses Aboard
siblings_spouses_aboard_enc = df['Siblings/Spouses Aboard'].values
siblings_spouses_aboard_enc = encoders['siblings_spouses_aboard_encoder'].transform(
siblings_spouses_aboard_enc)
# Parents/Children Aboard
parents_children_aboard_enc = df['Parents/Children Aboard'].values
parents_children_aboard_enc = encoders['parents_children_aboard_encoder'].transform(
parents_children_aboard_enc)
# Fare
fare_enc = df['Fare'].values.reshape(-1, 1)
fare_enc = encoders['fare_encoder'].transform(fare_enc)
data_enc = [pclass_enc,
sex_enc,
age_enc,
siblings_spouses_aboard_enc,
parents_children_aboard_enc,
fare_enc
]
if process_target:
# Target Field: Survived
survived_enc = df['Survived'].values
survived_enc = encoders['survived_encoder'].transform(survived_enc)
return (data_enc, survived_enc)
return data_enc
def model_predict(df, model, encoders):
"""Generates predictions for a trained model.
# Arguments
df: A pandas DataFrame containing the source data.
model: A compiled model.
encoders: a dict of encoders to process the data.
# Returns
A numpy array of predictions.
"""
data_enc = process_data(df, encoders, process_target=False)
data_enc = xgb.DMatrix( | np.hstack(data_enc) | numpy.hstack |
from hobart import faces_intersecting_plane, intersect_mesh_with_plane
import numpy as np
from polliwog import Plane, Polyline
from vg.compat import v2 as vg
box_vertices = np.array(
[
[0.5, -0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5],
[0.5, 0.5, -0.5, -0.5, 0.5, 0.5, -0.5, -0.5],
[0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5],
]
).T
box_faces = np.array(
[
[0, 1, 2],
[3, 2, 1],
[0, 2, 4],
[6, 4, 2],
[0, 4, 1],
[5, 1, 4],
[7, 5, 6],
[4, 6, 5],
[7, 6, 3],
[2, 3, 6],
[7, 3, 5],
[1, 5, 3],
]
)
double_box_vertices = np.vstack(
[box_vertices, np.array([2.0, 0.0, 0.0]) + box_vertices]
)
double_box_faces = np.vstack([box_faces, len(box_vertices) + box_faces])
non_intersecting_plane = Plane(np.array([0.0, 5.0, 0.0]), vg.basis.y)
def create_open_box():
v_on_faces_to_remove = np.nonzero(box_vertices[:, 0] < 0.0)[0]
faces_to_remove = np.all(
np.in1d(box_faces.ravel(), v_on_faces_to_remove).reshape((-1, 3)), axis=1
)
return box_faces[~faces_to_remove]
open_box_faces = create_open_box()
double_open_box_faces = np.vstack([open_box_faces, len(box_vertices) + open_box_faces])
def test_intersection():
# Verify that we're finding the correct number of faces to start with.
assert (
np.count_nonzero(faces_intersecting_plane(box_vertices, box_faces, Plane.xz))
== 8
)
xss = intersect_mesh_with_plane(box_vertices, box_faces, Plane.xz)
assert isinstance(xss, list)
assert len(xss) == 1
(xs,) = xss
assert xs.num_v == 8
assert xs.is_closed is True
assert xs.total_length == 4.0
np.testing.assert_array_equal(xs.v[:, 1], np.zeros(8))
for a, b in zip(xs.v[0:-1, [0, 2]], xs.v[1:, [0, 2]]):
# Each line changes only one coordinate, and is 0.5 long.
assert np.sum(a == b) == 1
assert np.linalg.norm(a - b) == 0.5
def test_intersection_with_ret_pointcloud():
(xs,) = intersect_mesh_with_plane(box_vertices, box_faces, Plane.xz)
pointcloud = intersect_mesh_with_plane(
box_vertices, box_faces, Plane.xz, ret_pointcloud=True
)
assert isinstance(pointcloud, np.ndarray)
np.testing.assert_array_equal(pointcloud, xs.v)
def test_no_intersection():
np.testing.assert_array_equal(
faces_intersecting_plane(box_vertices, box_faces, non_intersecting_plane),
np.zeros(12),
)
xss = intersect_mesh_with_plane(box_vertices, box_faces, non_intersecting_plane)
assert isinstance(xss, list)
assert xss == []
# TODO: Verify that we're detecting faces that lay entirely in the plane as
# potential intersections.
def test_no_intersection_with_neighborhood():
neighborhood = np.zeros((1, 3))
xs = intersect_mesh_with_plane(
box_vertices, box_faces, non_intersecting_plane, neighborhood=neighborhood
)
assert xs is None
def test_no_intersection_with_ret_pointcloud():
pointcloud = intersect_mesh_with_plane(
box_vertices, box_faces, non_intersecting_plane, ret_pointcloud=True
)
assert isinstance(pointcloud, np.ndarray)
assert pointcloud.shape == (0, 3)
def test_intersection_wth_two_components():
xss = intersect_mesh_with_plane(double_box_vertices, double_box_faces, Plane.xz)
assert isinstance(xss, list)
assert len(xss) == 2
first, second = xss
assert first.num_v == 8
assert second.num_v == 8
assert first.is_closed is True
assert second.is_closed is True
def test_intersection_wth_neighborhood():
neighborhood = np.zeros((1, 3))
xs = intersect_mesh_with_plane(
double_box_vertices, double_box_faces, Plane.xz, neighborhood=neighborhood
)
assert isinstance(xs, Polyline)
assert len(xs) == 8
assert xs.is_closed is True
def test_intersection_with_neighborhood_and_ret_pointcloud():
neighborhood = np.zeros((1, 3))
xs = intersect_mesh_with_plane(
double_box_vertices, double_box_faces, Plane.xz, neighborhood=neighborhood
)
pointcloud = intersect_mesh_with_plane(
double_box_vertices,
double_box_faces,
Plane.xz,
neighborhood=neighborhood,
ret_pointcloud=True,
)
assert isinstance(pointcloud, np.ndarray)
np.testing.assert_array_equal(pointcloud, xs.v)
def test_intersection_with_non_watertight_mesh():
xss = intersect_mesh_with_plane(box_vertices, open_box_faces, Plane.xz)
assert isinstance(xss, list)
assert len(xss) == 1
(xs,) = xss
# The removed side is not in the xsection.
assert not np.any(np.all(xs.v == [-0.5, 0.0, 0.0]))
assert xs.num_v == 7
assert xs.is_closed is False
assert xs.total_length == 3.0
np.testing.assert_array_equal(xs.v[:, 1], np.zeros(7))
for a, b in zip(xs.v[0:-1, [0, 2]], xs.v[1:, [0, 2]]):
# Each line changes only one coordinate, and is 0.5 long.
assert np.sum(a == b) == 1
assert np.linalg.norm(a - b) == 0.5
def test_intersection_with_mulitple_non_watertight_meshes():
xss = intersect_mesh_with_plane(
double_box_vertices, double_open_box_faces, Plane.xz
)
assert isinstance(xss, list)
assert len(xss) == 2
first, second = xss
assert first.num_v == 7
assert second.num_v == 7
assert first.is_closed is False
assert second.is_closed is False
# The removed side is not in either xsection.
assert not np.any(np.all(first.v == [-0.5, 0.0, 0.0]))
assert not np.any(np.all(second.v == [-0.5, 0.0, 0.0]))
assert first.total_length == 3.0
assert second.total_length == 3.0
np.testing.assert_array_equal(first.v[:, 1], np.zeros(7))
np.testing.assert_array_equal(second.v[:, 1], np.zeros(7))
for xs in xss:
for a, b in zip(xs.v[0:-1, [0, 2]], xs.v[1:, [0, 2]]):
# Each line changes only one coordinate, and is 0.5 long.
assert | np.sum(a == b) | numpy.sum |
import numpy as np
import copy
from itertools import combinations
from scipy.optimize import minimize, Bounds
from scipy.spatial.distance import cdist
from functools import partial
from scipy.linalg import solve_triangular
from scipy.special import kv, gamma
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, RBF, Product, Sum, \
ConstantKernel, WhiteKernel
from pyapprox import get_univariate_quadrature_rules_from_variable
from pyapprox.utilities import cartesian_product, outer_product, \
cholesky_solve_linear_system
from pyapprox.low_discrepancy_sequences import transformed_halton_sequence
from pyapprox.utilities import pivoted_cholesky_decomposition, \
continue_pivoted_cholesky_decomposition
from pyapprox.variables import IndependentMultivariateRandomVariable
from pyapprox.variable_transformations import \
AffineRandomVariableTransformation
from pyapprox.indexing import argsort_indices_leixographically
from pyapprox.probability_measure_sampling import \
generate_independent_random_samples
class GaussianProcess(GaussianProcessRegressor):
def set_variable_transformation(self, var_trans):
self.var_trans = var_trans
def map_to_canonical_space(self, samples):
if hasattr(self, 'var_trans'):
return self.var_trans.map_to_canonical_space(samples)
return samples
def map_from_canonical_space(self, canonical_samples):
if hasattr(self, 'var_trans'):
return self.var_trans.map_from_canonical_space(canonical_samples)
return canonical_samples
def fit(self, train_samples, train_values):
r"""
A light weight wrapper of sklearn GaussianProcessRegressor.fit
function. See sklearn documentation for more info. This wrapper
is needed because sklearn stores a unique sample in each row
of a samples matrix whereas pyapprox uses the transpose.
Parameters
----------
samples : np.ndarray (nvars,nsamples)
Samples at which to evaluate the GP. Sklearn requires the
transpose of this matrix, i.e a matrix with size (nsamples,nvars)
"""
canonical_train_samples = self.map_to_canonical_space(train_samples)
return super().fit(canonical_train_samples.T, train_values)
def __call__(self, samples, return_std=False, return_cov=False):
r"""
A light weight wrapper of sklearn GaussianProcessRegressor.predict
function. See sklearn documentation for more info. This wrapper
is needed because sklearn stores a unique sample in each row
of a samples matrix whereas pyapprox uses the transpose.
Parameters
----------
samples : np.ndarray (nvars,nsamples)
Samples at which to evaluate the GP. Sklearn requires the
transpose of this matrix, i.e a matrix with size (nsamples,nvars)
"""
canonical_samples = self.map_to_canonical_space(samples)
result = self.predict(canonical_samples.T, return_std, return_cov)
if type(result) == tuple:
# when returning prior stdev covariance then must reshape vals
if result[0].ndim == 1:
result = [result[0][:, None]] + [r for r in result[1:]]
result = tuple(result)
return result
def predict_random_realization(self, samples, rand_noise=1,
truncated_svd=None, keep_normalized=False):
"""
Predict values of a random realization of the Gaussian process
Notes
-----
A different realization will be returned for two different samples
Even if the same random noise i used. To see this for a 1D GP use:
xx = np.linspace(0, 1, 101)
rand_noise = np.random.normal(0, 1, (xx.shape[0], 1))
yy = gp.predict_random_realization(xx[None, :], rand_noise)
plt.plot(xx, yy)
xx = np.linspace(0, 1, 97)
rand_noise = np.random.normal(0, 1, (xx.shape[0], 1))
yy = gp.predict_random_realization(xx[None, :], rand_noise)
plt.plot(xx, yy)
plt.show()
Parameters
----------
truncated_svd : dictionary
Dictionary containing the following attribues needed to define
a truncated singular values decomposition. If None then
factor the entire matrix
nsingular_vals : integer
Only compute the first n singular values when
factorizing the covariance matrix. n=truncated_svd
tol : float
The contribution to total variance from the truncated singular
values must not exceed this value.
Notes
-----
This function replaces
gp.sample_y(samples.T, n_samples=rand_noise, random_state=0)
which cannot be passed rand_noise vectors and cannot use truncated SVD
"""
# mapping of samples is performed in __call__
mean, cov = self(samples, return_cov=True)
if keep_normalized is True:
mean = (mean - self._y_train_mean) / self._y_train_std
cov /= self._y_train_std**2
# Use SVD because it is more robust than Cholesky
# L = np.linalg.cholesky(cov)
if truncated_svd is None:
U, S, V = np.linalg.svd(cov)
else:
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(
n_components=min(samples.shape[1]-1,
truncated_svd['nsingular_vals']), n_iter=7)
svd.fit(cov)
U = svd.components_.T
S = svd.singular_values_
print('Explained variance', svd.explained_variance_ratio_.sum())
assert svd.explained_variance_ratio_.sum() >= truncated_svd['tol']
# print(S.shape, cov.shape)
L = U*np.sqrt(S)
# create nsamples x nvars then transpose so same samples
# are produced if this function is called repeatedly with nsamples=1
if np.isscalar(rand_noise):
rand_noise = np.random.normal(0, 1, (rand_noise, mean.shape[0])).T
else:
assert rand_noise.shape[0] == mean.shape[0]
if truncated_svd is not None:
rand_noise = rand_noise[:S.shape[0], :]
vals = mean + L.dot(rand_noise)
return vals
def num_training_samples(self):
return self.X_train_.shape[0]
def condition_number(self):
return np.linalg.cond(self.L_.dot(self.L_.T))
def get_training_samples(self):
if hasattr(self, "var_trans") and self.var_trans is not None:
return self.var_trans.map_from_canonical_space(self.X_train_.T)
else:
return self.X_train_.T
class RandomGaussianProcessRealizations:
"""
Light weight wrapper that allows random realizations of a Gaussian process
to be evaluated at an arbitrary set of samples.
GaussianProcess.predict_random_realization can only evaluate the GP
at a finite set of samples. This wrapper can only compute the mean
interpolant as we assume that the number of training samples
was sufficient to produce an approximation with accuracy (samll pointwise
variance acceptable to the user. Unlike GaussianProcess predictions
can return a np.ndarray (nsamples, nrandom_realizations)
instead of size (nsamples, 1) where nrandom_realizations is the number
of random realizations interpolated
Parameters
----------
nvalidation_samples : integer
The number of samples of the random realization used to compute the
accuracy of the interpolant.
"""
def __init__(self, gp, use_cholesky=False, alpha=0):
self.gp = gp
kernel_types = [RBF, Matern]
# ignore white noise kernel as we want to interpolate the data
self.kernel = extract_covariance_kernel(gp.kernel_, kernel_types)
constant_kernel = extract_covariance_kernel(
gp.kernel_, [ConstantKernel])
if constant_kernel is not None:
self.kernel = constant_kernel*self.kernel
self.use_cholesky = use_cholesky
# it is useful to specify alpha different to the one use to invert
# Kernel marix at training data of gp
self.alpha = alpha
def fit(self, candidate_samples, rand_noise=None,
ninterpolation_samples=500, nvalidation_samples=100):
"""
Construct interpolants of random realizations evaluated at the
training data and at a new set of additional points
"""
assert (ninterpolation_samples <=
candidate_samples.shape[1] + self.gp.X_train_.T.shape[1]), (
ninterpolation_samples,
candidate_samples.shape[1] + self.gp.X_train_.T.shape[1])
canonical_candidate_samples = self.gp.map_to_canonical_space(
candidate_samples)
canonical_candidate_samples = np.hstack(
(self.gp.X_train_.T, canonical_candidate_samples))
if self.use_cholesky is True:
Kmatrix = self.kernel(canonical_candidate_samples.T)
Kmatrix[np.diag_indices_from(Kmatrix)] += self.alpha
init_pivots = np.arange(self.gp.X_train_.T.shape[1])
# init_pivots = None
L, pivots, error, chol_flag = pivoted_cholesky_decomposition(
Kmatrix, ninterpolation_samples,
init_pivots=init_pivots, pivot_weights=None,
error_on_small_tol=False, return_full=False, econ=True)
if chol_flag > 0:
pivots = pivots[:-1]
msg = "Number of samples used for interpolation "
msg += f"{pivots.shape[0]} "
msg += f"was less than requested {ninterpolation_samples}"
print(msg)
# then not all points requested were selected
# because L became illconditioned. This usually means that no
# more candidate samples are useful and that error in
# interpolant will be small. Note chol_flag > 0 even when
# pivots.shape[0] == ninterpolation_samples. This means last
# step of cholesky factorization triggered the incomplete flag
self.L = L[pivots, :pivots.shape[0]]
# print('Condition Number', np.linalg.cond(L.dot(L.T)))
self.selected_canonical_samples = \
canonical_candidate_samples[:, pivots]
mask = np.ones(canonical_candidate_samples.shape[1], dtype=bool)
mask[pivots] = False
canonical_validation_samples = canonical_candidate_samples[
:, mask]
self.canonical_validation_samples = \
canonical_validation_samples[:, :nvalidation_samples]
else:
assert (ninterpolation_samples + nvalidation_samples <=
candidate_samples.shape[1])
self.selected_canonical_samples = \
canonical_candidate_samples[:, :ninterpolation_samples]
self.canonical_validation_samples = \
canonical_candidate_samples[:, ninterpolation_samples:ninterpolation_samples+nvalidation_samples]
Kmatrix = self.kernel(self.selected_canonical_samples.T)
Kmatrix[np.diag_indices_from(Kmatrix)] += self.alpha
self.L = np.linalg.cholesky(Kmatrix)
samples = np.hstack(
(self.selected_canonical_samples,
self.canonical_validation_samples))
# make last sample mean of gaussian process
rand_noise = rand_noise[:samples.shape[1], :]
rand_noise[:, -1] = np.zeros((rand_noise.shape[0]))
vals = self.gp.predict_random_realization(
self.gp.map_from_canonical_space(samples),
rand_noise=rand_noise, truncated_svd=None,
keep_normalized=True)
self.train_vals = vals[:self.selected_canonical_samples.shape[1]]
self.validation_vals = vals[self.selected_canonical_samples.shape[1]:]
# Entries of the following should be size of alpha when
# rand_noise[:, -1] = np.zeros((rand_noise.shape[0]))
# print(self.train_vals[:, -1]-self.gp.y_train_[:, 0])
# L_inv = np.linalg.inv(L.T)
# L_inv = solve_triangular(L.T, np.eye(L.shape[0]))
# self.K_inv_ = L_inv.dot(L_inv.T)
# self.alpha_ = self.K_inv_.dot(self.train_vals)
tmp = solve_triangular(self.L, self.train_vals, lower=True)
self.alpha_ = solve_triangular(self.L.T, tmp, lower=False)
approx_validation_vals = self.kernel(
self.canonical_validation_samples.T,
self.selected_canonical_samples.T).dot(self.alpha_)
error = np.linalg.norm(
approx_validation_vals-self.validation_vals, axis=0)/(
np.linalg.norm(self.validation_vals, axis=0))
# Error in interpolation of gp mean when
# rand_noise[:, -1] = np.zeros((rand_noise.shape[0]))
# print(np.linalg.norm((approx_validation_vals[:, -1]*self.gp._y_train_std+self.gp._y_train_mean)-self.gp(self.canonical_validation_samples)[:, 0])/np.linalg.norm(self.gp(self.canonical_validation_samples)[:, 0]))
print('Worst case relative interpolation error', error.max())
print('Median relative interpolation error', np.median(error))
def __call__(self, samples):
canonical_samples = self.gp.map_to_canonical_space(samples)
K_pred = self.kernel(
canonical_samples.T, self.selected_canonical_samples.T)
vals = K_pred.dot(self.alpha_)
vals = self.gp._y_train_std*vals + self.gp._y_train_mean
return vals
class AdaptiveGaussianProcess(GaussianProcess):
def setup(self, func, sampler):
self.func = func
self.sampler = sampler
def refine(self, num_samples):
# new samples must be in user domain
new_samples, chol_flag = self.sampler(num_samples)
new_values = self.func(new_samples)
assert new_values.shape[1] == 1 # must be scalar values QoI
if hasattr(self, 'X_train_'):
# get_training_samples returns samples in user space
train_samples = self.get_training_samples()
train_samples = np.hstack([train_samples, new_samples])
train_values = np.vstack([self.y_train_, new_values])
else:
train_samples, train_values = new_samples, new_values
# if self.var_trans is not None then when fit is called
# train_samples are mapped to cannonical domain
self.fit(train_samples, train_values)
return chol_flag
def is_covariance_kernel(kernel, kernel_types):
return (type(kernel) in kernel_types)
def extract_covariance_kernel(kernel, kernel_types):
cov_kernel = None
if is_covariance_kernel(kernel, kernel_types):
return copy.deepcopy(kernel)
if type(kernel) == Product or type(kernel) == Sum:
cov_kernel = extract_covariance_kernel(kernel.k1, kernel_types)
if cov_kernel is None:
cov_kernel = extract_covariance_kernel(kernel.k2, kernel_types)
return copy.deepcopy(cov_kernel)
def gaussian_tau(train_samples, delta, mu, sigma):
dists = (train_samples-mu)**2
return np.prod(np.sqrt(delta/(delta+2*sigma**2))*np.exp(
-(dists)/(delta+2*sigma**2)), axis=0)
def gaussian_u(delta, sigma):
return np.sqrt(delta/(delta+4*sigma**2)).prod()
def gaussian_P(train_samples, delta, mu, sigma):
nvars, ntrain_samples = train_samples.shape
P = np.ones((ntrain_samples, ntrain_samples))
for ii in range(nvars):
si, mi, di = sigma[ii, 0], mu[ii, 0], delta[ii, 0]
denom1 = 4*(di+4*si**2)
term2 = np.sqrt(di/(di+4*si**2))
for mm in range(ntrain_samples):
xm = train_samples[ii, mm]
xn = train_samples[ii, mm:]
P[mm, mm:] *= np.exp(-1/(2*si**2*di)*(
2*si**2*(xm**2+xn**2)+di*mi**2 -
(4*si**2*(xm+xn)+2*di*mi)**2/denom1))*term2
P[mm:, mm] = P[mm, mm:]
return P
def gaussian_nu(delta, sigma):
return np.sqrt(delta/(delta+8.*sigma**2)).prod()
def gaussian_Pi(train_samples, delta, mu, sigma):
nvars, ntrain_samples = train_samples.shape
Pi = np.ones((ntrain_samples, ntrain_samples))
for ii in range(nvars):
si, mi, di = sigma[ii, 0], mu[ii, 0], delta[ii, 0]
denom1 = (12*si**4+8*di*si**2+di**2)
denom2, denom3 = (di+2*si**2), (di+6*si**2)
for mm in range(ntrain_samples):
xm = train_samples[ii, mm]
xn = train_samples[ii, mm:]
t1 = 2*(xm-xn)**2/di+3*(-2*mi+xm+xn)**2/denom2+(xm-xn)**2/denom3
Pi[mm, mm:] *= np.exp(-t1/6)*np.sqrt(di**2/(denom1))
Pi[mm:, mm] = Pi[mm, mm:]
return Pi
def compute_v_sq(A_inv, P):
# v_sq = 1-np.trace(A_inv.dot(P))
v_sq = (1-np.sum(A_inv*P))
return v_sq
def compute_zeta(y, A_inv, P):
return y.T.dot(A_inv.dot(P).dot(A_inv)).dot(y)
def compute_zeta_econ(y, A_inv_y, A_inv_P):
return y.T.dot(A_inv_P.dot(A_inv_y))
def compute_varpi(tau, A_inv):
return tau.T.dot(A_inv).dot(tau)
def compute_varsigma_sq(u, varpi):
return u-varpi
def compute_varphi(A_inv, P):
tmp = A_inv.dot(P)
varphi = np.sum(tmp.T*tmp)
return varphi
def compute_varphi_econ(A_inv_P):
varphi = np.sum(A_inv_P.T*A_inv_P)
return varphi
def compute_psi(A_inv, Pi):
return np.sum(A_inv.T*Pi)
def compute_chi(nu, varphi, psi):
return nu+varphi-2*psi
def compute_phi(train_vals, A_inv, Pi, P):
return train_vals.T.dot(A_inv).dot(Pi).dot(A_inv).dot(train_vals) -\
train_vals.T.dot(A_inv).dot(P).dot(A_inv).dot(P).dot(A_inv).dot(
train_vals)
def compute_phi_econ(A_inv_y, A_inv_P, Pi, P):
return A_inv_y.T.dot(Pi.dot(A_inv_y))-A_inv_y.T.dot(
P.dot(A_inv_P.dot(A_inv_y)))
def compute_varrho(lamda, A_inv, train_vals, P, tau):
return lamda.T.dot(A_inv.dot(train_vals)) - tau.T.dot(
A_inv.dot(P).dot(A_inv.dot(train_vals)))
def compute_varrho_econ(lamda, A_inv_y, A_inv_P, tau):
return lamda.T.dot(A_inv_y) - tau.T.dot(A_inv_P.dot(A_inv_y))
def compute_xi(xi_1, lamda, tau, P, A_inv):
return xi_1+tau.dot(A_inv).dot(P).dot(A_inv).dot(tau) -\
2*lamda.dot(A_inv).dot(tau)
def compute_xi_econ(xi_1, lamda, tau, A_inv_P, A_inv_tau):
return xi_1+tau.dot(A_inv_P.dot(A_inv_tau)) -\
2*lamda.dot(A_inv_tau)
def compute_var_of_var_term1(phi, kernel_var, chi, zeta, v_sq):
# E[ I_2^2] (term1)
return 4*phi*kernel_var + 2*chi*kernel_var**2+(
zeta+v_sq*kernel_var)**2
def compute_var_of_var_term2(eta, varrho, kernel_var, xi, zeta, v_sq,
varsigma_sq):
# -2E[I_2I^2] (term2)
return 4*eta*varrho*kernel_var+2*xi*kernel_var**2 +\
zeta*varsigma_sq*kernel_var+v_sq*varsigma_sq*kernel_var**2 +\
zeta*eta**2+eta**2*v_sq*kernel_var
def compute_var_of_var_term3(varsigma_sq, kernel_var, eta, v_sq):
# E[I^4]
return 3*varsigma_sq**2*kernel_var**2+6*eta**2*varsigma_sq*kernel_var +\
eta**4
def gaussian_lamda(train_samples, delta, mu, sigma):
nvars = train_samples.shape[0]
lamda = 1
for ii in range(nvars):
xxi, si = train_samples[ii, :], sigma[ii, 0]
mi, di = mu[ii, 0], delta[ii, 0]
denom1 = 4*si**4+6*di*si**2+di**2
t1 = (di+4*si**2)/denom1*(mi-xxi)**2
lamda *= di/np.sqrt(denom1)*np.exp(-t1)
return lamda
def gaussian_xi_1(delta, sigma):
return (delta/np.sqrt((delta+2*sigma**2)*(delta+6*sigma**2))).prod()
def variance_of_mean(kernel_var, varsigma_sq):
return kernel_var*varsigma_sq
def mean_of_variance(zeta, v_sq, kernel_var, expected_random_mean,
variance_random_mean):
return zeta+v_sq*kernel_var-expected_random_mean**2-variance_random_mean
def extract_gaussian_process_attributes_for_integration(gp):
if extract_covariance_kernel(gp.kernel_, [WhiteKernel]) is not None:
raise Exception('kernels with noise not supported')
kernel_types = [
RBF, Matern, UnivariateMarginalizedSquaredExponentialKernel]
kernel = extract_covariance_kernel(gp.kernel_, kernel_types)
constant_kernel = extract_covariance_kernel(gp.kernel_, [ConstantKernel])
if constant_kernel is not None:
kernel_var = constant_kernel.constant_value
else:
kernel_var = 1
if (not type(kernel) == RBF and not
(type(kernel) == Matern and not np.isfinite(kernel.nu)) and not
(type(kernel) == UnivariateMarginalizedSquaredExponentialKernel)):
# Squared exponential kernel
msg = f'GP Kernel type: {type(kernel)} '
msg += 'Only squared exponential kernel supported'
raise Exception(msg)
if not hasattr(gp, '_K_inv') or gp._K_inv is None:
# scikit-learn < 0.24.2 has _K_inv
# scikit-learn >= 0.24.2 does not
L_inv = solve_triangular(gp.L_.T, np.eye(gp.L_.shape[0]), lower=False)
K_inv = L_inv.dot(L_inv.T)
else:
K_inv = gp._K_inv.copy()
transform_quad_rules = (not hasattr(gp, 'var_trans'))
# gp.X_train_ will already be in the canonical space if var_trans is used
x_train = gp.X_train_.T
# correct for normalization of gaussian process training data
# gp.y_train_ is normalized such that
# y_train = gp._y_train_std*gp.y_train_ + gp._y_train_mean
# shift must be accounted for in integration so do not add here
y_train = gp._y_train_std*gp.y_train_
kernel_var *= float(gp._y_train_std**2)
K_inv /= gp._y_train_std**2
return x_train, y_train, K_inv, kernel.length_scale, \
kernel_var, transform_quad_rules
def integrate_gaussian_process(gp, variable, return_full=False,
nquad_samples=50):
"""
The alpha regularization parameter used to construct the gp stored
in gp.alpha can significantly impact condition number of A_inv
and thus the accuracy that can be obtained in estimates of integrals
particularly associated with variance. However setting alpha too large
will also limit the accuracy that can be achieved
"""
x_train, y_train, K_inv, kernel_length_scale, kernel_var, \
transform_quad_rules = \
extract_gaussian_process_attributes_for_integration(gp)
result = integrate_gaussian_process_squared_exponential_kernel(
x_train, y_train, K_inv, kernel_length_scale,
kernel_var, variable, return_full, transform_quad_rules,
nquad_samples, gp._y_train_mean)
expected_random_mean, variance_random_mean, expected_random_var, \
variance_random_var = result[:4]
if return_full is True:
return expected_random_mean, variance_random_mean, \
expected_random_var, variance_random_var, result[4]
return expected_random_mean, variance_random_mean, \
expected_random_var, variance_random_var
def integrate_tau_P(xx_1d, ww_1d, xtr, lscale_ii):
dist_func = partial(cdist, metric='sqeuclidean')
dists_1d_x1_xtr = dist_func(
xx_1d[:, np.newaxis]/lscale_ii, xtr.T/lscale_ii)
K = np.exp(-.5*dists_1d_x1_xtr)
tau = ww_1d.dot(K)
P = K.T.dot(ww_1d[:, np.newaxis]*K)
return tau, P
def integrate_u_lamda_Pi_nu(xx_1d, ww_1d, xtr, lscale_ii):
# Get 2D tensor product quadrature rule
xx_2d = cartesian_product([xx_1d]*2)
ww_2d = outer_product([ww_1d]*2)
dists_2d_x1_x2 = (xx_2d[0, :].T/lscale_ii-xx_2d[1, :].T/lscale_ii)**2
K = np.exp(-.5*dists_2d_x1_x2)
u = ww_2d.dot(K)
dist_func = partial(cdist, metric='sqeuclidean')
dists_2d_x1_x2 = (xx_2d[0:1, :].T/lscale_ii-xx_2d[1:2, :].T/lscale_ii)**2
dists_2d_x2_xtr = dist_func(xx_2d[1:2, :].T/lscale_ii, xtr.T/lscale_ii)
lamda = np.exp(-.5*dists_2d_x1_x2.T-.5*dists_2d_x2_xtr.T).dot(ww_2d)
dists_2d_x1_xtr = dist_func(xx_2d[0:1, :].T/lscale_ii, xtr.T/lscale_ii)
# ntrain_samples = xtr.shape[1]
# Pi = np.empty((ntrain_samples, ntrain_samples))
# for mm in range(ntrain_samples):
# dists1=dists_2d_x1_xtr[:, mm:mm+1]
# Pi[mm, mm:]= np.exp(
# -.5*dists1-.5*dists_2d_x1_x2-.5*dists_2d_x2_xtr[:, mm:]).T.dot(
# ww_2d)
# Pi[mm:, mm] = Pi[mm, mm:]
w = np.exp(-.5*dists_2d_x1_x2[:, 0])*ww_2d
Pi = np.exp(-.5*dists_2d_x1_xtr).T.dot(w[:, np.newaxis]*np.exp(
-.5*dists_2d_x2_xtr))
nu = np.exp(-dists_2d_x1_x2)[:, 0].dot(ww_2d)
return u, lamda, Pi, nu
def integrate_xi_1(xx_1d, ww_1d, lscale_ii):
xx_3d = cartesian_product([xx_1d]*3)
ww_3d = outer_product([ww_1d]*3)
dists_3d_x1_x2 = (xx_3d[0, :]/lscale_ii-xx_3d[1, :]/lscale_ii)**2
dists_3d_x2_x3 = (xx_3d[1, :]/lscale_ii-xx_3d[2, :]/lscale_ii)**2
xi_1 = np.exp(-.5*dists_3d_x1_x2-.5*dists_3d_x2_x3).dot(ww_3d)
return xi_1
def get_gaussian_process_squared_exponential_kernel_1d_integrals(
X_train, length_scale, variable, transform_quad_rules,
nquad_samples=50, skip_xi_1=False):
nvars = variable.num_vars()
degrees = [nquad_samples]*nvars
univariate_quad_rules, pce = get_univariate_quadrature_rules_from_variable(
variable, degrees)
lscale = np.atleast_1d(length_scale)
# tau, u = 1, 1
# ntrain_samples = X_train.shape[1]
# P = np.ones((ntrain_samples, ntrain_samples))
# lamda = np.ones(ntrain_samples)
# Pi = np.ones((ntrain_samples, ntrain_samples))
# xi_1, nu = 1, 1
tau_list, P_list, u_list, lamda_list = [], [], [], []
Pi_list, nu_list, xi_1_list = [], [], []
for ii in range(nvars):
# TODO only compute quadrature once for each unique quadrature rules
# But all quantities must be computed for all dimensions because
# distances depend on either of both dimension dependent length scale
# and training sample values
# But others like u only needed to be computed for each unique
# Quadrature rule and raised to the power equal to the number of
# instances of a unique rule
# Define distance function
# dist_func = partial(cdist, metric='sqeuclidean')
# Training samples of ith variable
xtr = X_train[ii:ii+1, :]
# Get 1D quadrature rule
xx_1d, ww_1d = univariate_quad_rules[ii](degrees[ii]+1)
if transform_quad_rules is True:
jj = pce.basis_type_index_map[ii]
loc, scale = pce.var_trans.scale_parameters[jj, :]
xx_1d = xx_1d*scale+loc
# Evaluate 1D integrals
tau_ii, P_ii = integrate_tau_P(xx_1d, ww_1d, xtr, lscale[ii])
# tau *= tau_ii
# P *= P_ii
u_ii, lamda_ii, Pi_ii, nu_ii = integrate_u_lamda_Pi_nu(
xx_1d, ww_1d, xtr, lscale[ii])
# u *= u_ii
# lamda *= lamda_ii
# Pi *= Pi_ii
# nu *= nu_ii
if skip_xi_1 is False:
xi_1_ii = integrate_xi_1(xx_1d, ww_1d, lscale[ii])
else:
xi_1_ii = None
# xi_1 *= xi_1_ii
tau_list.append(tau_ii)
P_list.append(P_ii)
u_list.append(u_ii)
lamda_list.append(lamda_ii)
Pi_list.append(Pi_ii)
nu_list.append(nu_ii)
xi_1_list.append(xi_1_ii)
return tau_list, P_list, u_list, lamda_list, Pi_list, nu_list, xi_1_list
def integrate_gaussian_process_squared_exponential_kernel(
X_train,
Y_train,
K_inv,
length_scale,
kernel_var,
variable,
return_full=False,
transform_quad_rules=False,
nquad_samples=50,
y_train_mean=0):
r"""
Compute
.. math:: I = \int \eta(\rv) \rho(\rv) ;d\rv
and
.. math:: \Sigma = I_2 - I^2, \qquad I_2 = \int \eta^2(\rv) \rho(\rv) ;d\rv
where :math:`\rho(\rv)` is the joint density of independent random
variables and :math:`\eta(\rv)` is a Gaussian process (GP)
constructed with the squared exponential kernel
.. math: K(x,y;L)=\sigma_K^2 \exp(-\frac{\lVert x-y\rVert_2^2}{2*L^2})
with :math:`L` being a np.ndarray of shape (nvars) containing the
length scales of the covariance kernel.
Because the GP is a random process, the expectation :math:`I` and the
variance :math:`\Sigma` of the GP with respect to :math:`\rv` are
themselves random variables. Specifically the expectation is a Gaussian
random variable with mean :math:`\mu` and variance :math:`v^2`. The
distribution of :math:`\Sigma` is harder to compute, but we can compute
its mean and variance
Parameters
----------
X_train : np.ndarray (nvars,nsamples)
The locations of the training data used to train the GP
Y_train : np.ndarray (nvars,nsamples)
The data values at ``X_train`` used to train the GP
K_inv : np.ndarray (nsamples,nsamples)
The inverse of the covariance matrix
:math:`K(X_train,X_train;length_scale)`
length_scale : np.ndarray (nvars)
The length scales :math:`L`
kernel_var : float
The variance :math:`\sigma_K^2` of the kernel :math:`K`
variable : :class:`pyapprox.variable.IndependentMultivariateRandomVariable`
A set of independent univariate random variables. The tensor-product
of the 1D PDFs yields the joint density :math:`\rho`
return_full : boolean
If true return intermediate quantities used to compute statistics.
This is only necessary for testing
Returns
-------
expected_random_mean : float
The mean :math:`\mu_I` of the Gaussian random variable representing the
expectation :math:`I`
variance_random_mean : float
The variance :math:`v_I^2` of the Gaussian random variable representing
the expectation :math:`I`
expected_random_var : float
The mean :math:`\mu_\Sigma` of the Gaussian random variable
representing the variance :math:`\Sigma`
variance_random_var : float
The variance :math:`v_\Sigma^2` of the Gaussian random variable
representing the variance :math:`\Sigma`
"""
tau_list, P_list, u_list, lamda_list, Pi_list, nu_list, xi_1_list = \
get_gaussian_process_squared_exponential_kernel_1d_integrals(
X_train, length_scale, variable, transform_quad_rules,
nquad_samples)
tau = np.prod(np.array(tau_list), axis=0)
P = np.prod(np.array(P_list), axis=0)
u = np.prod(u_list)
lamda = np.prod(np.array(lamda_list), axis=0)
Pi = np.prod(np.array(Pi_list), axis=0)
nu = np.prod(nu_list)
xi_1 = np.prod(xi_1_list)
# K_inv is inv(kernel_var*A). Thus multiply by kernel_var to get
# Haylock formula
A_inv = K_inv*kernel_var
# No kernel_var because it cancels out because it appears in K (1/s^2)
# and t (s^2)
A_inv_y = A_inv.dot(Y_train)
expected_random_mean = tau.dot(A_inv_y)
expected_random_mean += y_train_mean
varpi = compute_varpi(tau, A_inv)
varsigma_sq = compute_varsigma_sq(u, varpi)
variance_random_mean = variance_of_mean(kernel_var, varsigma_sq)
A_inv_P = A_inv.dot(P)
A_inv_tau = A_inv.dot(tau)
v_sq = compute_v_sq(A_inv, P)
# zeta = compute_zeta(Y_train, A_inv, P)
zeta = compute_zeta_econ(Y_train, A_inv_y, A_inv_P)
zeta += 2*tau.dot(A_inv_y)*y_train_mean+y_train_mean**2
expected_random_var = mean_of_variance(
zeta, v_sq, kernel_var, expected_random_mean, variance_random_mean)
# varphi = compute_varphi(A_inv, P)
varphi = compute_varphi_econ(A_inv_P)
psi = compute_psi(A_inv, Pi)
chi = compute_chi(nu, varphi, psi)
eta = expected_random_mean
# varrho = compute_varrho(lamda, A_inv, Y_train, P, tau)
varrho = compute_varrho_econ(lamda, A_inv_y, A_inv_P, tau)
# phi = compute_phi(Y_train, A_inv, Pi, P)
phi = compute_phi_econ(A_inv_y, A_inv_P, Pi, P)
# adjust phi with unadjusted varrho
phi += 2*y_train_mean*varrho+y_train_mean**2*varsigma_sq
# now adjust varrho
varrho += y_train_mean*varsigma_sq
# xi = compute_xi(xi_1, lamda, tau, P, A_inv)
xi = compute_xi_econ(xi_1, lamda, tau, A_inv_P, A_inv_tau)
term1 = compute_var_of_var_term1(phi, kernel_var, chi, zeta, v_sq)
term2 = compute_var_of_var_term2(
eta, varrho, kernel_var, xi, zeta, v_sq, varsigma_sq)
term3 = compute_var_of_var_term3(varsigma_sq, kernel_var, eta, v_sq)
variance_random_var = term1-2*term2+term3
variance_random_var -= expected_random_var**2
if not return_full:
return expected_random_mean, variance_random_mean, \
expected_random_var, variance_random_var
intermeadiate_quantities = tau, u, varpi, varsigma_sq, P, v_sq, zeta, nu, \
varphi, Pi, psi, chi, phi, lamda, varrho, xi_1, xi
return expected_random_mean, variance_random_mean, expected_random_var,\
variance_random_var, intermeadiate_quantities
def generate_gp_candidate_samples(nvars, num_candidate_samples,
generate_random_samples, variables):
if generate_random_samples is not None:
num_halton_candidates = num_candidate_samples//2
num_random_candidates = num_candidate_samples//2
else:
num_halton_candidates = num_candidate_samples
num_random_candidates = 0
if variables is None:
marginal_icdfs = None
else:
# marginal_icdfs = [v.ppf for v in self.variables]
from scipy import stats
marginal_icdfs = []
# spread QMC samples over entire domain. Range of variable
# is used but not its PDF
for v in variables.all_variables():
lb, ub = v.interval(1)
if not np.isfinite(lb) or not np.isfinite(ub):
lb, ub = v.interval(1-1e-6)
marginal_icdfs.append(stats.uniform(lb, ub-lb).ppf)
candidate_samples = transformed_halton_sequence(
marginal_icdfs, nvars, num_halton_candidates)
if num_random_candidates > 0:
candidate_samples = np.hstack((
candidate_samples, generate_random_samples(num_random_candidates)))
return candidate_samples
class CholeskySampler(object):
"""
Compute samples for kernel based approximation using the power-function
method.
Parameters
----------
num_vars : integer
The number of variables
num_candidate_samples : integer
The number of candidate samples from which final samples are chosen
variable : :class:`pyapprox.variable.IndependentMultivariateRandomVariable`
A set of independent univariate random variables. The tensor-product
of the 1D PDFs yields the joint density :math:`\rho`
max_num_samples : integer
The maximum number of samples to be generated
weight_function : callable
Function used to precondition kernel with the signature
``weight_function(samples) -> np.ndarray (num_samples)``
where samples is a np.ndarray (num_vars,num_samples)
generate_random_samples : callable
Function with signature
``generate_random_samples(nsamples) -> np.ndarray (nvars, nsamples)``
used to generate samples to enrich default candidate set.
If this is not None then num_candidate_samples//2 will be created
by this function and the other half of samples will be from a Halton
sequence.
init_pivots : np.ndarray (ninit_pivots)
The array indices of the candidate_samples to keep
econ : boolean
True - pivot based upon diagonal of schur complement
False - pivot to minimize trace norm of low-rank approximation
"""
def __init__(self, num_vars, num_candidate_samples, variables=None,
generate_random_samples=None, init_pivots=None,
nugget=0, econ=True, gen_candidate_samples=None,
var_trans=None):
self.nvars = num_vars
self.kernel_theta = None
self.chol_flag = None
self.variables = variables
self.generate_random_samples = generate_random_samples
if gen_candidate_samples is None:
gen_candidate_samples = partial(
generate_gp_candidate_samples, self.nvars,
generate_random_samples=self.generate_random_samples,
variables=self.variables)
self.var_trans = var_trans
self.set_candidate_samples(
gen_candidate_samples(num_candidate_samples))
self.set_weight_function(None)
self.ntraining_samples = 0
self.set_init_pivots(init_pivots)
self.nugget = nugget
self.econ = econ
def set_candidate_samples(self, candidate_samples):
if self.var_trans is not None:
self.candidate_samples = self.var_trans.map_to_canonical_space(
candidate_samples)
else:
self.candidate_samples = candidate_samples
def add_nugget(self):
self.Kmatrix[np.arange(self.Kmatrix.shape[0]),
np.arange(self.Kmatrix.shape[1])] += self.nugget
def set_weight_function(self, weight_function):
self.pivot_weights = None
if self.var_trans is None or weight_function is None:
self.weight_function = weight_function
else:
# weight function is applied in canonical_space
def wt_function(x):
return weight_function(
self.var_trans.map_from_canonical_space(x))
self.weight_function = wt_function
if self.weight_function is not None:
self.pivot_weights = self.weight_function(self.candidate_samples)
self.weight_function_changed = True
def set_kernel(self, kernel):
if not hasattr(self, 'kernel') or self.kernel != kernel:
self.kernel_changed = True
self.kernel = kernel
self.kernel_theta = self.kernel.theta
def set_init_pivots(self, init_pivots):
self.init_pivots = init_pivots
self.training_samples = \
self.candidate_samples[:, :self.ntraining_samples]
self.init_pivots_changed = True
def __call__(self, num_samples):
if not hasattr(self, 'kernel'):
raise Exception('Must call set_kernel')
if not hasattr(self, 'weight_function'):
raise Exception('Must call set_weight_function')
if num_samples < self.training_samples.shape[1]:
msg = f'Requesting number of samples {num_samples} which is less '
msg += 'than number of training samples already generated '
msg += f'{self.training_samples.shape[1]}'
raise Exception(msg)
if self.kernel_theta is None:
assert self.kernel_changed
nprev_train_samples = self.ntraining_samples
if (self.weight_function_changed or self.kernel_changed or
self.init_pivots_changed):
self.Kmatrix = self.kernel(self.candidate_samples.T)
if self.econ is False and self.pivot_weights is not None:
weights = np.sqrt(self.pivot_weights)
# assert np.allclose(np.diag(weights).dot(self.Kmatrix.dot(
# np.diag(weights))),
# weights[:, np.newaxis]*self.Kmatrix*weights)
self.Kmatrix = weights[:, np.newaxis]*self.Kmatrix*weights
self.pivot_weights = None
if self.nugget > 0:
self.add_nugget()
self.L, self.pivots, error, self.chol_flag, self.diag, \
self.init_error, self.ntraining_samples = \
pivoted_cholesky_decomposition(
self.Kmatrix, num_samples, init_pivots=self.init_pivots,
pivot_weights=self.pivot_weights,
error_on_small_tol=False, return_full=True, econ=self.econ)
self.weight_function_changed = False
self.kernel_changed = False
else:
self.L, self.pivots, self.diag, self.chol_flag, \
self.ntraining_samples, error = \
continue_pivoted_cholesky_decomposition(
self.Kmatrix, self.L, num_samples, self.init_pivots,
0., False, self.pivot_weights, self.pivots, self.diag,
self.ntraining_samples, self.init_error, econ=self.econ)
if self.chol_flag == 0:
assert self.ntraining_samples == num_samples
self.init_pivots = self.pivots[:self.ntraining_samples].copy()
# extract samples that were not already in sample set
# pivots has already been reduced to have the size of the number of
# samples requested
new_samples = \
self.candidate_samples[:, self.pivots[
nprev_train_samples:self.ntraining_samples]]
self.training_samples = np.hstack(
[self.training_samples, new_samples])
if self.var_trans is None:
return new_samples, self.chol_flag
return self.var_trans.map_from_canonical_space(
new_samples), self.chol_flag
class AdaptiveCholeskyGaussianProcessFixedKernel(object):
"""
Efficient implementation when Gaussian process kernel has no tunable
hyper-parameters. Cholesky factor computed to generate training samples
is reused for fiting
"""
def __init__(self, sampler, func):
self.sampler = sampler
self.func = func
self.chol_flag = 0
def refine(self, num_samples):
if self.chol_flag > 0:
msg = 'Cannot refine. No well conditioned candidate samples '
msg += 'remaining'
print(msg)
return
new_samples, self.chol_flag = self.sampler(num_samples)
new_values = self.func(new_samples)
assert new_values.shape[0] == new_samples.shape[1]
if hasattr(self, 'train_samples'):
self.train_samples = np.hstack([self.train_samples, new_samples])
self.train_values = np.vstack([self.train_values, new_values])
else:
self.train_samples, self.train_values = new_samples, new_values
self.fit()
def get_current_chol_factor(self):
nn = self.sampler.ntraining_samples
if type(self.sampler) == CholeskySampler:
chol_factor = self.sampler.L[self.sampler.pivots[:nn], :nn]
elif type(self.sampler) == GreedyIntegratedVarianceSampler:
chol_factor = self.sampler.L[:nn, :nn]
else:
raise Exception()
return chol_factor
def fit(self):
chol_factor = self.get_current_chol_factor()
self.coef = cholesky_solve_linear_system(
chol_factor, self.train_values)
def __call__(self, samples):
return self.sampler.kernel(samples.T, self.train_samples.T).dot(
self.coef)
def num_training_samples(self):
return self.train_samples.shape[1]
def condition_number(self):
chol_factor = self.get_current_chol_factor()
return np.linalg.cond(chol_factor.dot(chol_factor.T))
def gaussian_process_pointwise_variance(kernel, pred_samples, train_samples,
nugget=0):
r"""
Compute the pointwise variance of a Gaussian process, that is
.. math::
K(\hat{x}, \hat{x}) - K(\hat{X}, y)^T K(\hat{X}, \hat{X}) K(\hat{X}, y)
for each sample :math:`\hat{x}=[\hat{x}_1,\ldots,\hat{x}_d]` and a set of
training samples :math:`X=[x^{(1)},\ldots,x^{(N)}]`
Parameters
----------
kernel : callable
Function with signature
``K(X, Y) -> np.ndarray(X.shape[0], Y.shape[0])``
where X and Y are samples with shape (nsamples_X, nvars) and
(nsamples_Y, nvars). Note this function accepts sample sets stored in
the transpose of the typical pyapprox format
train_samples : np.ndarray (nvars, ntrain_samples)
The locations of the training data used to train the GP
pred_samples : np.ndarray (nvars, npred_samples)
The data values at ``X_train`` used to train the GP
Returns
-------
variance : np.ndarray (npred_samples)
The pointwise variance at each prediction sample
"""
K_train = kernel(train_samples.T)
# add small number to diagonal to ensure covariance matrix is
# positive definite
ntrain_samples = train_samples.shape[1]
K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget
k_pred = kernel(train_samples.T, pred_samples.T)
L = np.linalg.cholesky(K_train)
tmp = solve_triangular(L, k_pred, lower=True)
variance = kernel.diag(pred_samples.T) - np.sum(tmp*tmp, axis=0)
return variance
def RBF_gradient_wrt_samples(query_sample, other_samples, length_scale):
r"""
Gradient of the squared exponential kernel
.. math::
\frac{\partial}{\partial x}K(x, Y) = -K(x, Y)^T \circ D\Lambda^{-1}
Here :math:`x=[x_1,\ldots,x_d]^T` is a sample,
:math:`Y=[y^{(1)},\ldots,y^{(N)}]`
is a set of samples and the kernel is given by
.. math::
K(x, y^{(i)}) =
\exp\left(-\frac{1}{2}(x-y^{(i)})^T\Lambda^{-1}(x-y^{(i)})\right)
where
:math:`\Lambda^{-1}=\mathrm{diag}([l_1^2,\ldots,l_d^2])`,
:math:`D=[\tilde{x}-\tilde{y}^{(1)},\ldots,\tilde{x}-\tilde{y}^{(N)}]` and
.. math::
\tilde{x} = \left[\frac{x_1}{l_1^2}, \ldots, \frac{x_d}{l_d^2}\right],
\qquad \tilde{y}^{(i)} =
\left[\frac{y_1^{(i)}}{l_1^2},\ldots, \frac{y_d^{(i)}}{l_d^2}\right]
Parameters
----------
query_sample : np.ndarray (nvars, 1)
The sample :math:`x`
other_samples : np.ndarray (nvars, nother_samples)
The samples :math:`y`
length_scale : np.ndarray (nvars)
The length scales `l` in each dimension
Returns
-------
grad : np.ndarray (nother_samples, nvars)
The gradient of the kernel
"""
dists = cdist(query_sample.T/length_scale, other_samples.T/length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
grad = -K.T*(
np.tile(query_sample.T, (other_samples.shape[1], 1))-other_samples.T)/(
np.asarray(length_scale)**2)
return grad
def RBF_integrated_posterior_variance_gradient_wrt_samples(
train_samples, quad_x, quad_w,
kernel, new_samples_index=0, nugget=0):
r"""
"""
nvars, ntrain_samples = train_samples.shape
length_scale = kernel.length_scale
if np.isscalar(length_scale):
length_scale = np.array([length_scale]*nvars)
K_train = kernel(train_samples.T)
# add small number to diagonal to ensure covariance matrix is
# positive definite
ntrain_samples = train_samples.shape[1]
K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget
A_inv = np.linalg.inv(K_train)
grad_P, P = integrate_grad_P(
quad_x, quad_w, train_samples, length_scale)
AinvPAinv = (A_inv.dot(P).dot(A_inv))
noptimized_train_samples = ntrain_samples-new_samples_index
jac = np.zeros((nvars*noptimized_train_samples))
cnt = 0
for kk in range(new_samples_index, ntrain_samples):
K_train_grad_all_train_points_kk = \
RBF_gradient_wrt_samples(
train_samples[:, kk:kk+1], train_samples, length_scale)
# Use the follow properties for tmp3 and tmp4
# Do sparse matrix element wise product
# 0 a 0 D00 D01 D02
# a b c x D10 D11 D12
# 0 c 0 D20 D21 D22
# =2*(a*D01 b*D11 + c*D21)-b*D11
#
# Trace [RCRP] = Trace[RPRC] for symmetric matrices
tmp3 = -2*np.sum(K_train_grad_all_train_points_kk.T*AinvPAinv[:, kk],
axis=1)
tmp3 -= -K_train_grad_all_train_points_kk[kk, :]*AinvPAinv[kk, kk]
jac[cnt*nvars:(cnt+1)*nvars] = -tmp3
tmp4 = 2*np.sum(grad_P[kk*nvars:(kk+1)*nvars]*A_inv[:, kk], axis=1)
tmp4 -= grad_P[kk*nvars:(kk+1)*nvars, kk]*A_inv[kk, kk]
jac[cnt*nvars:(cnt+1)*nvars] -= tmp4
cnt += 1
return jac
def RBF_posterior_variance_jacobian_wrt_samples(
train_samples, pred_samples,
kernel, new_samples_index=0, nugget=0):
r"""
Gradient of the posterior covariance of a Gaussian process built
using the squared exponential kernel. Let :math:`\hat{x}^{(i)}` be a
prediction sample and :math:`x=[x^{(1)}, \ldots, x^{(N)}]` be the
training samples then the posterior covariance is
.. math::
c(\hat{x}^{(i)}, x)=c(\hat{x}^{(i)}, \hat{x}^{(i)}) -
K(\hat{x}^{(i)}, x)R K(\hat{x}^{(i)}, x)^T
and
.. math::
\frac{\partial c(\hat{x}^{(i)}, x)}{\partial x_l}=
2\left(\frac{\partial}{\partial x_l}K(\hat{x}^{(i)}, x_l)\right)
\sum_{k=1}^N
R[l,k]K(\hat{x}^{(i)}, x_k) - \sum_{j=1}^N\sum_{k=1}^N K(\hat{x}^{(i)},
x_j)\frac{\partial}{\partial x_l}\left(R[j,k]\right)(\hat{x}^{(i)}, x_k)
where :math:`R = K(x, x)^{-1}` and
.. math::
\frac{\partial R^{-1}}{\partial x_l} = R^{-1}
\frac{\partial R}{\partial x_l} R^{-1}
Parameters
----------
train_samples : np.ndarray (nvars, ntrain_samples)
The locations of the training data used to train the GP
pred_samples : np.ndarray (nvars, npred_samples)
The data values at ``X_train`` used to train the GP
kernel : callable
Function with signature
``K(X, Y) -> np.ndarray(X.shape[0], Y.shape[0])``
where X and Y are samples with shape (nsamples_X, nvars) and
(nsamples_Y, nvars). Note this function accepts sample sets stored in
the transpose of the typical pyapprox format
new_samples_index : integer
Index in train samples that indicates the train samples for which
derivatives will be computed. That is compute the derivatives of the
coordinates of train_samples[:,new_sample_index:]
Returns
-------
jac : np.ndarray (npred_samples, (ntrain_samples-new_sample_index)*nvars)
"""
length_scale = kernel.length_scale
nvars, npred_samples = pred_samples.shape
ntrain_samples = train_samples.shape[1]
noptimized_train_samples = ntrain_samples-new_samples_index
k_pred_grad_all_train_points = np.zeros(
(noptimized_train_samples, npred_samples, nvars))
ii = 0
for jj in range(new_samples_index, ntrain_samples):
k_pred_grad_all_train_points[ii, :, :] = \
RBF_gradient_wrt_samples(
train_samples[:, jj:jj+1], pred_samples, length_scale)
ii += 1
K_train = kernel(train_samples.T)
# add small number to diagonal to ensure covariance matrix is
# positive definite
ntrain_samples = train_samples.shape[1]
K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget
K_inv = np.linalg.inv(K_train)
k_pred = kernel(train_samples.T, pred_samples.T)
jac = np.zeros((npred_samples, nvars*noptimized_train_samples))
tau = k_pred.T.dot(K_inv)
# K_train_grad = np.zeros((ntrain_samples, ntrain_samples))
ii = 0
for jj in range(new_samples_index, ntrain_samples):
K_train_grad_all_train_points_jj = \
RBF_gradient_wrt_samples(
train_samples[:, jj:jj+1], train_samples, length_scale)
jac[:, ii*nvars:(ii+1)*nvars] += \
2*tau[:, jj:jj+1]*k_pred_grad_all_train_points[ii, :, :]
tmp1 = K_train_grad_all_train_points_jj.T[:, np.newaxis, :] *\
np.tile(tau[:, jj:jj+1], (2, 1, ntrain_samples))
tmp1[:, :, jj] = K_train_grad_all_train_points_jj.T.dot(tau.T)
tmp2 = np.sum(tau*tmp1, axis=(2))
jac[:, ii*nvars:(ii+1)*nvars] -= tmp2.T # check if -= is needed over =
# leave the following for loop to show how sparsity is taken advantage
# of above. Above is abstract and hard to see what is being done
# for kk in range(nvars):
# # K_train_grad[jj, :] = K_train_grad_all_train_points_jj[:, kk]
# # K_train_grad[:, jj] = K_train_grad[jj, :]
# # The following takes advantage of sparsity of
# # tmp = tau.dot(K_train_grad)
# # Reset to zero
# # K_train_grad[jj, :] = 0
# # K_train_grad[:, jj] = 0
# tmp = K_train_grad_all_train_points_jj[:, kk:kk+1].T *\
# np.tile(tau[:, jj:jj+1], (1, ntrain_samples))
# tmp[:, jj] = tau.dot(K_train_grad_all_train_points_jj[:, kk])
# assert np.allclose(tmp[:,jj], tmp1[kk,:,jj])
# assert np.allclose(tmp,tmp1[kk,:,:])
# jac[:, ii*nvars+kk] -= np.sum(tmp*tau, axis=1)
ii += 1
jac *= -1
return jac
def gaussian_grad_P_diag_term1(xtr_ii, lscale, mu, sigma):
m, s, l, a = mu, sigma, lscale, xtr_ii
term1 = (np.exp(-((a-m)**2/(l**2+2*s**2)))*l*(-a+m))/(l**2+2*s**2)**(3/2)
return term1
def gaussian_grad_P_diag_term2(xtr_ii, lscale, mu, sigma):
n, p, q, b = mu, sigma, lscale, xtr_ii
term2 = np.exp(-((b-n)**2/(2*p**2+q**2)))/(p*np.sqrt(1/p**2+2/q**2))
return term2
def gaussian_grad_P_offdiag_term1(xtr_ii, xtr_jj, lscale, mu, sigma):
m, s, l, a, c = mu, sigma, lscale, xtr_ii, xtr_jj
term1 = (
np.exp(-((-2*c*l**2*m+2*l**2*m**2+a**2*(l**2+s**2)+c**2*(l**2+s**2) -
2*a*(l**2*m+c*s**2))/(
2*l**2*(l**2+2*s**2))))*(l**2*m+c*s**2-a*(l**2+s**2)))/(
l*(l**2+2*s**2)**(3/2))
return term1
def gaussian_grad_P_offdiag_term2(xtr_ii, xtr_jj, lscale, mu, sigma):
b, d, q, n, p = xtr_ii, xtr_jj, lscale, mu, sigma
term2 = np.exp(-((-2*d*n*q**2+2*n**2*q**2+b**2*(p**2+q**2)+d **
2*(p**2+q**2)-2*b*(d*p**2+n*q**2))/(
2*q**2*(2*p**2+q**2))))
term2 /= p*np.sqrt(1/p**2+2/q**2)
return term2
def integrate_grad_P(xx, ww, xtr, lscale):
nvars = len(lscale)
assert len(xx) == len(ww) == nvars
assert xtr.shape[0] == nvars
dist_func = partial(cdist, metric='sqeuclidean')
ntrain_samples = xtr.shape[1]
grad_P = np.empty((nvars*ntrain_samples, ntrain_samples))
K = [] # keep K as list to allow for different size quadrature rules
diffs = [] # similarly for diffs
P = np.empty((nvars, ntrain_samples, ntrain_samples))
for nn in range(nvars):
xx_1d, ww_1d = xx[nn], ww[nn]
lscale_nn = lscale[nn]
dists_1d_x1_xtr = dist_func(
xx_1d[:, np.newaxis]/lscale_nn, xtr[nn:nn+1, :].T/lscale_nn)
K.append(np.exp(-.5*dists_1d_x1_xtr))
P[nn] = K[-1].T.dot(ww_1d[:, np.newaxis]*K[-1])
diffs.append(-(xtr[nn:nn+1, :].T-xx_1d)/lscale_nn**2)
# TODO replace loop over train samples with numpy operations
for ii in range(ntrain_samples):
for nn in range(nvars):
diff = diffs[nn][ii]
grad_P[nvars*ii+nn, :] = ww_1d.dot(
(diff*K[nn][:, ii])[:, np.newaxis]*K[nn])
grad_P[nvars*ii+nn, :] *= np.prod(P[:nn, ii, :], axis=0)
grad_P[nvars*ii+nn, :] *= np.prod(P[nn+1:, ii, :], axis=0)
grad_P[nvars*ii+nn, ii] *= 2
return grad_P, np.prod(P, axis=0)
class IVARSampler(object):
"""
Parameters
----------
num_vars : integer
The number of dimensions
nquad_samples : integer
The number of samples used to compute the sample based estimate
of the integrated variance (IVAR). If use_quadrature is True
then this should be 100-1000. Otherwise this value should be at
least 10,000.
ncandidate_samples : integer
The number of samples used by the greedy downselection procedure
used to determine the initial guess (set of points) for the gradient
based optimization
generate_random_samples : callable
Function with signature
``generate_random_samples(nsamples) -> np.ndarray (nvars, nsamples)``
used to generate samples needed to compute IVAR using Monte Carlo
quadrature. Note even if use_gauss_quadrature is True, this function
will be used (if provided) to enrich the default candidate set of the
greedy method used to compute the initial guess for the gradient based
optimization.
If this is not None then num_candidate_samples//2 will be created
by this function and the other half of samples will be from a Halton
sequence.
variables : :class:`pyapprox.variable.IndependentMultivariateRandomVariable`
A set of independent univariate random variables. The tensor-product
of the 1D PDFs yields the joint density :math:`\rho`. The bounds and
CDFs of these variables are used to transform the Halton sequence used
as the candidate set for the greedy generation of the initial guess.
greedy_method : string
Name of the greedy strategy for computing the initial guess used
for the gradient based optimization
use_gauss_quadrature : boolean
True - Assume the kernel is the tensor product of univariate kernels
and compute integrated variance by computing a set of univariate
integrals with Gaussian quadrature
False - Use monte carlo quadrature to estimate integrated variance.
Any kernel can be used.
nugget : float
A small value added to the diagonal of the kernel matrix to improve
conditioning.
"""
def __init__(self, num_vars, nquad_samples,
ncandidate_samples, generate_random_samples, variables=None,
greedy_method='ivar', use_gauss_quadrature=False,
nugget=0):
self.nvars = num_vars
self.nquad_samples = nquad_samples
self.greedy_method = greedy_method
self.use_gauss_quadrature = use_gauss_quadrature
self.pred_samples = generate_random_samples(self.nquad_samples)
self.ncandidate_samples = ncandidate_samples
self.variables = variables
self.generate_random_samples = generate_random_samples
self.nugget = nugget
self.ntraining_samples = 0
self.training_samples = np.empty((num_vars, self.ntraining_samples))
self.nsamples_requested = []
self.set_optimization_options(
{'gtol': 1e-8, 'ftol': 0, 'disp': False, 'iprint': 0})
self.initialize_greedy_sampler()
if use_gauss_quadrature:
self.precompute_gauss_quadrature()
self.objective = self.quadrature_objective
self.objective_gradient = self.quadrature_objective_gradient
assert self.greedy_sampler.variables is not None
else:
self.objective = self.monte_carlo_objective
self.objective_gradient = self.monte_carlo_objective_gradient
def initialize_greedy_sampler(self):
if self.greedy_method == 'chol':
self.greedy_sampler = CholeskySampler(
self.nvars, self.ncandidate_samples, self.variables,
generate_random_samples=self.generate_random_samples)
elif self.greedy_method == 'ivar':
self.greedy_sampler = GreedyIntegratedVarianceSampler(
self.nvars, self.nquad_samples, self.ncandidate_samples,
self.generate_random_samples, self.variables,
use_gauss_quadrature=self.use_gauss_quadrature, econ=True,
nugget=self.nugget)
else:
msg = f'Incorrect greedy_method {self.greedy_method}'
raise Exception(msg)
def precompute_gauss_quadrature(self):
degrees = [min(100, self.nquad_samples)]*self.nvars
self.univariate_quad_rules, self.pce = \
get_univariate_quadrature_rules_from_variable(
self.greedy_sampler.variables, degrees)
self.quad_rules = []
for ii in range(self.nvars):
xx_1d, ww_1d = self.univariate_quad_rules[ii](degrees[ii]+1)
jj = self.pce.basis_type_index_map[ii]
loc, scale = self.pce.var_trans.scale_parameters[jj, :]
xx_1d = xx_1d*scale+loc
self.quad_rules.append([xx_1d, ww_1d])
def get_univariate_quadrature_rule(self, ii):
return self.quad_rules[ii]
def compute_P(self, train_samples):
self.degrees = [self.nquad_samples]*self.nvars
length_scale = self.greedy_sampler.kernel.length_scale
if np.isscalar(length_scale):
length_scale = np.array([length_scale]*self.nvars)
P = 1
for ii in range(self.nvars):
xx_1d, ww_1d = self.get_univariate_quadrature_rule(ii)
xtr = train_samples[ii:ii+1, :]
K = self.greedy_sampler.kernels_1d[ii](
xx_1d[np.newaxis, :], xtr, length_scale[ii])
P_ii = K.T.dot(ww_1d[:, np.newaxis]*K)
P *= P_ii
return P
def quadrature_objective(self, new_train_samples_flat):
train_samples = np.hstack(
[self.training_samples,
new_train_samples_flat.reshape(
(self.nvars, new_train_samples_flat.shape[0]//self.nvars),
order='F')])
A = self.greedy_sampler.kernel(train_samples.T)
A[np.arange(A.shape[0]), np.arange(A.shape[1])] += self.nugget
A_inv = np.linalg.inv(A)
P = self.compute_P(train_samples)
return 1-np.trace(A_inv.dot(P))
def quadrature_objective_gradient(self, new_train_samples_flat):
train_samples = np.hstack(
[self.training_samples,
new_train_samples_flat.reshape(
(self.nvars, new_train_samples_flat.shape[0]//self.nvars),
order='F')])
xx = [q[0] for q in self.quad_rules]
ww = [q[1] for q in self.quad_rules]
new_samples_index = self.training_samples.shape[1]
return RBF_integrated_posterior_variance_gradient_wrt_samples(
train_samples, xx, ww, self.greedy_sampler.kernel,
new_samples_index, nugget=self.nugget)
def monte_carlo_objective(self, new_train_samples_flat):
train_samples = np.hstack(
[self.training_samples,
new_train_samples_flat.reshape(
(self.nvars, new_train_samples_flat.shape[0]//self.nvars),
order='F')])
val = gaussian_process_pointwise_variance(
self.greedy_sampler.kernel, self.pred_samples,
train_samples, self.nugget).mean()
# print('f',val)
return val
def monte_carlo_objective_gradient(self, new_train_samples_flat):
train_samples = np.hstack(
[self.training_samples,
new_train_samples_flat.reshape(
(self.nvars, new_train_samples_flat.shape[0]//self.nvars),
order='F')])
new_samples_index = self.training_samples.shape[1]
return RBF_posterior_variance_jacobian_wrt_samples(
train_samples, self.pred_samples, self.greedy_sampler.kernel,
new_samples_index, self.nugget).mean(axis=0)
def set_weight_function(self, weight_function):
self.greedy_sampler.set_weight_function(weight_function)
def set_kernel(self, kernel, kernels_1d=None):
if ((self.use_gauss_quadrature is True) and (self.nvars != 1) and
((type(kernel) != Matern) or (np.isfinite(kernel.nu)))):
# TODO: To deal with sum kernel with noise, need to ammend
# gradient computation which currently assumes no noise
msg = f'GP Kernel type: {type(kernel)} '
msg += 'Only squared exponential kernel supported when '
msg += 'use_gauss_quadrature is True and nvars > 1'
# TODO add other tensor product kernels
raise Exception(msg)
self.greedy_sampler.set_kernel(copy.deepcopy(kernel), kernels_1d)
def set_optimization_options(self, opts):
self.optim_opts = opts.copy()
def set_bounds(self, nsamples):
if self.greedy_sampler.variables is None:
lbs, ubs = np.zeros(self.nvars), np.ones(self.nvars)
else:
variables = self.greedy_sampler.variables.all_variables()
lbs = [v.interval(1)[0] for v in variables]
ubs = [v.interval(1)[1] for v in variables]
lbs = np.repeat(lbs, nsamples)
ubs = np.repeat(ubs, nsamples)
self.bounds = Bounds(lbs, ubs)
def __call__(self, nsamples):
self.nsamples_requested.append(nsamples)
# Remove previous training samples from candidate set to prevent
# adding them twice
candidate_samples = self.greedy_sampler.candidate_samples
if len(self.nsamples_requested) > 1:
candidate_samples = candidate_samples[
:, self.nsamples_requested[-2]:]
# Add previous optimized sample set to candidate samples. This could
# potentially add a candidate twice if the optimization picks some
# of the original candidate samples chosen by
# greedy_sampler.generate_samples, but this is unlikely. If it does
# happen these points will never be chosen by the cholesky algorithm
candidate_samples = np.hstack([
self.training_samples.copy(), candidate_samples])
# make sure greedy sampler recomputes all necessary information
# but first extract necessary information
pred_samples = self.greedy_sampler.pred_samples
if hasattr(self.greedy_sampler, 'weight_function'):
weight_function = self.greedy_sampler.weight_function
else:
weight_function = None
kernel = self.greedy_sampler.kernel
self.initialize_greedy_sampler()
if weight_function is not None:
self.set_weight_function(weight_function)
# self.greedy_sampler.candidate_samples must be called before
# set kernel to make sure self.A matrix is set correctly
self.greedy_sampler.candidate_samples = candidate_samples
# currently the following will no effect a different set
# of prediction samples will be generated by greedy sampler when
# set kernel is called
self.greedy_sampler.pred_samples = pred_samples
self.set_kernel(kernel)
# Make sure greedy_sampler chooses self.training_samples
# only used if greedy_sampler is a Choleskysampler.
self.greedy_sampler.set_init_pivots(np.arange(self.ntraining_samples))
# Get the initial guess for new samples to add.
# Note the Greedy sampler will return only new samples not in
# self.training_samples
self.init_guess, chol_flag = self.greedy_sampler(nsamples)
self.init_guess = self.init_guess[:, self.ntraining_samples:]
# assert np.allclose(
# self.greedy_sampler.L[:self.ntraining_samples,
# :self.ntraining_samples],
# np.linalg.cholesky(kernel(self.training_samples.T)))
assert chol_flag == 0
self.set_bounds(nsamples-self.ntraining_samples)
init_guess = self.init_guess.flatten(order='F')
# Optimize the locations of only the new training samples
jac = self.objective_gradient
res = minimize(self.objective, init_guess, jac=jac,
method='L-BFGS-B', options=self.optim_opts,
bounds=self.bounds)
print(res)
new_samples = res.x.reshape(
(self.nvars, res.x.shape[0]//self.nvars), order='F')
self.training_samples = np.hstack([self.training_samples, new_samples])
self.ntraining_samples = self.training_samples.shape[1]
return new_samples, 0
def matern_kernel_1d_inf(dists):
return np.exp(-.5*dists**2)
def matern_kernel_1d_12(dists):
return np.exp(-dists)
def matern_kernel_1d_32(dists):
tmp = np.sqrt(3)*dists
return (1+tmp)*np.exp(-tmp)
def matern_kernel_1d_52(dists):
tmp = np.sqrt(5)*dists
return (1+tmp+tmp**2/3)*np.exp(-tmp)
def matern_kernel_general(nu, dists):
dists[dists == 0] += np.finfo(float).eps
tmp = (np.sqrt(2*nu) * dists)
return tmp**nu*(2**(1.-nu))/gamma(nu)*kv(nu, tmp)
def matern_kernel_1d(nu, x, y, lscale):
explicit_funcs = {0.5: matern_kernel_1d_12, 1.5: matern_kernel_1d_32,
2.: matern_kernel_1d_52, np.inf: matern_kernel_1d_inf}
dist_func = partial(cdist, metric='euclidean')
dists = dist_func(x.T/lscale, y.T/lscale)
if nu in explicit_funcs:
return explicit_funcs[nu](dists)
return matern_kernel_general(nu, dists)
class GreedyVarianceOfMeanSampler(object):
"""
Parameters
----------
num_vars : integer
The number of dimensions
nquad_samples : integer
The number of samples used to compute the sample based estimate
of the variance of mean criteria
ncandidate_samples : integer
The number of samples used by the greedy downselection procedure
"""
def __init__(self, num_vars, nquad_samples,
ncandidate_samples, generate_random_samples, variables=None,
use_gauss_quadrature=False, econ=True,
compute_cond_nums=False, nugget=0):
self.nvars = num_vars
self.nquad_samples = nquad_samples
self.variables = variables
self.ntraining_samples = 0
self.training_samples = np.empty((num_vars, self.ntraining_samples))
self.generate_random_samples = generate_random_samples
self.use_gauss_quadrature = use_gauss_quadrature
self.econ = econ
self.candidate_samples = generate_gp_candidate_samples(
self.nvars, ncandidate_samples, generate_random_samples,
self.variables)
self.nsamples_requested = []
self.pivots = []
self.cond_nums = []
self.compute_cond_nums = compute_cond_nums
self.init_pivots = None
self.nugget = nugget
self.initialize()
self.best_obj_vals = []
self.pred_samples = None
def initialize(self):
self.L = np.zeros((0, 0))
if self.econ is True:
self.y_1 = np.zeros((0))
self.candidate_y_2 = np.empty(self.candidate_samples.shape[1])
# def monte_carlo_objective(self, new_sample_index):
# train_samples = np.hstack(
# [self.training_samples,
# self.candidate_samples[:, new_sample_index:new_sample_index+1]])
# return gaussian_process_pointwise_variance(
# self.kernel, self.pred_samples,
# train_samples).mean()
def precompute_monte_carlo(self):
self.pred_samples = self.generate_random_samples(
self.nquad_samples)
k = self.kernel(self.pred_samples.T, self.candidate_samples.T)
self.tau = k.mean(axis=0)
assert self.tau.shape[0] == self.candidate_samples.shape[1]
# Note because tau is simplified down to one integral instead of their
# double used for u, it is possible for self.u - tau.dot(A_inv.dot(tau)
# to be negative if tau is comptued using an inaccurate quadrature
# rule. This is not important if using gauss quadrature
# pred_samples2 = self.generate_random_samples(self.pred_samples.shape[1])
# self.u = np.diag(
# self.kernel(self.pred_samples.T, pred_samples2.T)).mean()
def get_univariate_quadrature_rule(self, ii):
xx_1d, ww_1d = self.univariate_quad_rules[ii](self.degrees[ii]+1)
jj = self.pce.basis_type_index_map[ii]
loc, scale = self.pce.var_trans.scale_parameters[jj, :]
xx_1d = xx_1d*scale+loc
return xx_1d, ww_1d
def precompute_gauss_quadrature(self):
nvars = self.variables.num_vars()
length_scale = self.kernel.length_scale
if np.isscalar(length_scale):
length_scale = [length_scale]*nvars
self.degrees = [self.nquad_samples]*nvars
self.univariate_quad_rules, self.pce = \
get_univariate_quadrature_rules_from_variable(
self.variables, self.degrees)
# dist_func = partial(cdist, metric='sqeuclidean')
self.tau = 1
for ii in range(self.nvars):
# Get 1D quadrature rule
xx_1d, ww_1d = self.get_univariate_quadrature_rule(ii)
# Training samples of ith variable
xtr = self.candidate_samples[ii:ii+1, :]
lscale_ii = length_scale[ii]
# dists_1d_x1_xtr = dist_func(
# xx_1d[:, np.newaxis]/lscale_ii, xtr.T/lscale_ii)
# K = np.exp(-.5*dists_1d_x1_xtr)
K = self.kernels_1d[ii](xx_1d[np.newaxis, :], xtr, lscale_ii)
self.tau *= ww_1d.dot(K)
def objective(self, new_sample_index):
indices = np.concatenate(
[self.pivots, [new_sample_index]]).astype(int)
A = self.A[np.ix_(indices, indices)]
try:
L = np.linalg.cholesky(A)
except:
return np.inf
tau = self.tau[indices]
return -tau.T.dot(cholesky_solve_linear_system(L, tau))
def objective_vals(self):
obj_vals = np.inf*np.ones(self.candidate_samples.shape[1])
for mm in range(self.candidate_samples.shape[1]):
if mm not in self.pivots:
obj_vals[mm] = self.objective(mm)
# assert np.allclose(self.candidate_samples[:,self.pivots],self.training_samples)
# if len(self.pivots)>22:
# I = np.argsort(self.candidate_samples[0,:])
# plt.plot(self.candidate_samples[0,self.pivots],np.ones((len(self.pivots)))*obj_vals.min(),'ko')
# plt.plot(self.candidate_samples[0,I],obj_vals[I])
# J = np.argmin(obj_vals)
# plt.plot(self.candidate_samples[0,J],obj_vals[J], 'rs')
# plt.show()
return obj_vals
def refine_naive(self):
if (self.init_pivots is not None and
len(self.pivots) < len(self.init_pivots)):
pivot = self.init_pivots[len(self.pivots)]
obj_val = self.objective(pivot)
else:
# ntraining_samples = self.ntraining_samples
obj_vals = self.objective_vals()
pivot = np.argmin(obj_vals)
obj_val = obj_vals[pivot]
return pivot, obj_val
def refine_econ(self):
if (self.init_pivots is not None and
len(self.pivots) < len(self.init_pivots)):
pivot = self.init_pivots[len(self.pivots)]
obj_val = self.objective_econ(pivot)
else:
# training_samples = self.ntraining_samples
obj_vals = self.vectorized_objective_vals_econ()
pivot = np.argmin(obj_vals)
obj_val = obj_vals[pivot]
assert np.isfinite(obj_val)
if self.L.shape[0] == 0:
self.L = np.atleast_2d(self.A[pivot, pivot])
else:
A_12 = self.A[self.pivots, pivot:pivot+1]
L_12 = solve_triangular(self.L, A_12, lower=True)
L_22_sq = self.A[pivot, pivot] - L_12.T.dot(L_12)
if L_22_sq <= 0:
# recompute Cholesky from scratch to make sure roundoff error
# is not causing L_22_sq to be negative
indices = np.concatenate([self.pivots, [pivot]]).astype(int)
try:
self.L = np.linalg.cholesky(
self.A[np.ix_(indices, indices)])
except:
return -1, np.inf
L_22 = np.sqrt(L_22_sq)
self.L = np.block(
[[self.L, np.zeros(L_12.shape)],
[L_12.T, L_22]])
assert np.isfinite(self.candidate_y_2[pivot])
self.y_1 = np.concatenate([self.y_1, [self.candidate_y_2[pivot]]])
return pivot, obj_val
def objective_vals_econ(self):
obj_vals = np.inf*np.ones(self.candidate_samples.shape[1])
for mm in range(self.candidate_samples.shape[1]):
if mm not in self.pivots:
obj_vals[mm] = self.objective_econ(mm)
return obj_vals
def vectorized_objective_vals_econ(self):
if self.L.shape[0] == 0:
diag_A = np.diagonal(self.A)
L = np.sqrt(diag_A)
vals = self.tau**2/diag_A
self.candidate_y_2 = self.tau/L
return -vals
A_12 = np.atleast_2d(self.A[self.pivots, :])
L_12 = solve_triangular(self.L, A_12, lower=True)
J = np.where((np.diagonal(self.A)-np.sum(L_12*L_12, axis=0)) <= 0)[0]
self.temp = np.diagonal(self.A)-np.sum(L_12*L_12, axis=0)
useful_candidates = np.ones(
(self.candidate_samples.shape[1]), dtype=bool)
useful_candidates[J] = False
useful_candidates[self.pivots] = False
L_12 = L_12[:, useful_candidates]
L_22 = np.sqrt(np.diagonal(self.A)[useful_candidates] - np.sum(
L_12*L_12, axis=0))
y_2 = (self.tau[useful_candidates]-L_12.T.dot(self.y_1))/L_22
self.candidate_y_2[useful_candidates] = y_2
self.candidate_y_2[~useful_candidates] = np.inf
z_2 = y_2/L_22
vals = np.inf*np.ones((self.candidate_samples.shape[1]))
vals[useful_candidates] = -(
self.best_obj_vals[-1] + self.tau[useful_candidates]*z_2 -
self.tau[self.pivots].dot(
solve_triangular(self.L.T, L_12*z_2, lower=False)))
return vals
def objective_econ(self, new_sample_index):
if self.L.shape[0] == 0:
L = np.sqrt(self.A[new_sample_index, new_sample_index])
self.candidate_y_2[new_sample_index] = self.tau[new_sample_index]/L
val = self.tau[new_sample_index]**2/self.A[
new_sample_index, new_sample_index]
return -val
A_12 = self.A[self.pivots, new_sample_index:new_sample_index+1]
L_12 = solve_triangular(self.L, A_12, lower=True)
L_22 = np.sqrt(
self.A[new_sample_index, new_sample_index] - L_12.T.dot(L_12))
y_2 = (self.tau[new_sample_index]-L_12.T.dot(self.y_1))/L_22[0, 0]
self.candidate_y_2[new_sample_index] = y_2
z_2 = y_2/L_22[0, 0]
val = -(-self.best_obj_vals[-1] + self.tau[new_sample_index]*z_2 -
self.tau[self.pivots].dot(
solve_triangular(self.L.T, L_12*z_2, lower=False)))
return val[0, 0]
def compute_A(self):
self.active_candidates = np.ones(
self.candidate_samples.shape[1], dtype=bool)
self.A = self.kernel(
self.candidate_samples.T, self.candidate_samples.T)
def set_kernel(self, kernel, kernels_1d=None):
self.kernel = kernel
self.kernels_1d = kernels_1d
if self.kernels_1d is None and self.use_gauss_quadrature:
# TODO: remove kernels 1D and just create tensor product
# kernel with this as a property.
assert self.kernel.nu == np.inf
self.kernels_1d = [partial(matern_kernel_1d, np.inf)]*self.nvars
if ((self.use_gauss_quadrature is True) and (self.nvars != 1) and
((type(kernel) != Matern) or (np.isfinite(kernel.nu)))):
# TODO: To deal with sum kernel with noise, need to ammend
# gradient computation which currently assumes no noise
msg = f'GP Kernel type: {type(kernel)} '
msg += 'Only squared exponential kernel supported when '
msg += 'use_gauss_quadrature is True and nvars > 1'
# TODO add other tensor product kernels
raise Exception(msg)
if self.use_gauss_quadrature:
self.precompute_gauss_quadrature()
else:
self.precompute_monte_carlo()
self.compute_A()
# designs are better if a small nugget is added to the diagonal
self.add_nugget()
def add_nugget(self):
self.A[np.arange(self.A.shape[0]), np.arange(self.A.shape[1])] += \
self.nugget
def set_init_pivots(self, init_pivots):
assert len(self.pivots) == 0
self.init_pivots = list(init_pivots)
def update_training_samples(self, pivot):
self.pivots.append(pivot)
# new_sample = self.candidate_samples[:, pivot:pivot+1]
self.training_samples = np.hstack(
[self.training_samples,
self.candidate_samples[:, pivot:pivot+1]])
def __call__(self, nsamples, verbosity=1):
if not hasattr(self, 'kernel'):
raise Exception('Must call set_kernel')
if self.econ is True:
self.refine = self.refine_econ
else:
self.refine = self.refine_naive
flag = 0
self.nsamples_requested.append(nsamples)
ntraining_samples = self.ntraining_samples
for nn in range(ntraining_samples, nsamples):
pivot, obj_val = self.refine()
if pivot < 0:
flag = 1
break
# if self.econ is False:
# flag = 1
# break
# else:
# self.econ = False
# # Switch of econ mode which struggles when condition
# # number is poor
# print('switching naive updating strategy on')
# self.refine = self.refine_naive
# pivot, obj_val = self.refine()
# if pivot < 0:
# flag = 1
# break
if verbosity > 0:
print(f'Iter: {nn}, Objective: {obj_val}')
self.best_obj_vals.append(obj_val)
self.update_training_samples(pivot)
# print(f'Number of points generated {nn+1}')
self.active_candidates[pivot] = False
if self.compute_cond_nums is True:
if self.econ:
self.cond_nums.append(np.linalg.cond(self.L)**2)
else:
self.cond_nums.append(
np.linalg.cond(
self.A[np.ix_(self.pivots, self.pivots)]))
# print(np.linalg.cond(
# self.A[np.ix_(self.pivots, self.pivots)]))
new_samples = self.training_samples[:, ntraining_samples:]
self.ntraining_samples = self.training_samples.shape[1]
return new_samples, flag
def matern_gradient_wrt_samples(nu, query_sample, other_samples, length_scale):
"""
Parameters
----------
query_sample : np.ndarray (nvars, 1)
other_samples : np.ndarray (nvars, nquery_samples)
length_scale : np.ndarray (nvars)
"""
if type(length_scale) == np.ndarray:
assert length_scale.shape[0] == query_sample.shape[0]
length_scale = np.asarray(length_scale)
dists = cdist(query_sample.T/length_scale, other_samples.T/length_scale,
metric='euclidean')
if nu == 3/2:
tmp1 = np.sqrt(3)*dists
tmp2 = (np.tile(
query_sample.T, (other_samples.shape[1], 1))-other_samples.T)/(
length_scale**2)
K = np.exp(-tmp1)
grad = -3*K.T*tmp2
elif nu == 5/2:
tmp1 = np.sqrt(5)*dists
K = np.exp(-tmp1)
tmp2 = (np.tile(
query_sample.T, (other_samples.shape[1], 1))-other_samples.T)/(
length_scale**2)
grad = -5/3*K.T*tmp2*(np.sqrt(5)*dists+1)
elif nu == np.inf:
tmp2 = (np.tile(
query_sample.T, (other_samples.shape[1], 1))-other_samples.T)/(
length_scale**2)
K = np.exp(-.5 * dists**2)
grad = -K.T*tmp2
else:
raise Exception(f'Matern gradient with nu={nu} not supported')
return grad
class GreedyIntegratedVarianceSampler(GreedyVarianceOfMeanSampler):
"""
Parameters
----------
num_vars : integer
The number of dimensions
nquad_samples : integer
The number of samples used to compute the sample based estimate
of the integrated variance (IVAR)
ncandidate_samples : integer
The number of samples used by the greedy downselection procedure
"""
def initialize(self):
self.L = np.zeros((0, 0))
self.L_inv = np.zeros((0, 0))
self.A_inv = np.zeros((0, 0))
def precompute_monte_carlo(self):
self.pred_samples = self.generate_random_samples(
self.nquad_samples)
# lscale = self.kernel.length_scale
# if np.isscalar(lscale):
# lscale = np.array([lscale]*self.nvars)
# dist_func = partial(cdist, metric='sqeuclidean')
# dists_x1_xtr = dist_func(
# self.pred_samples.T/lscale, self.candidate_samples.T/lscale)
# K = np.exp(-.5*dists_x1_xtr)
K = self.kernel(self.pred_samples.T, self.candidate_samples.T)
ww = np.ones(self.pred_samples.shape[1])/self.pred_samples.shape[1]
self.P = K.T.dot(ww[:, np.newaxis]*K)
def precompute_gauss_quadrature(self):
self.degrees = [self.nquad_samples]*self.nvars
length_scale = self.kernel.length_scale
if np.isscalar(length_scale):
length_scale = np.array([length_scale]*self.nvars)
self.univariate_quad_rules, self.pce = \
get_univariate_quadrature_rules_from_variable(
self.variables, self.degrees)
self.P = 1
for ii in range(self.nvars):
xx_1d, ww_1d = self.get_univariate_quadrature_rule(ii)
xtr = self.candidate_samples[ii:ii+1, :]
K = self.kernels_1d[ii](
xx_1d[np.newaxis, :], xtr, length_scale[ii])
P_ii = K.T.dot(ww_1d[:, np.newaxis]*K)
self.P *= P_ii
def objective(self, new_sample_index):
indices = np.concatenate(
[self.pivots, [new_sample_index]]).astype(int)
A = self.A[np.ix_(indices, indices)]
A_inv = np.linalg.inv(A)
P = self.P[np.ix_(indices, indices)]
# P1=1
# length_scale = self.kernel.length_scale
# if np.isscalar(length_scale):
# length_scale = np.array([length_scale]*self.nvars)
# for ii in range(self.nvars):
# xx_1d, ww_1d = self.get_univariate_quadrature_rule(ii)
# xtr = self.candidate_samples[ii:ii+1, indices]
# K = self.kernels_1d[ii](
# xx_1d[np.newaxis, :], xtr, length_scale[ii])
# P_ii = K.T.dot(ww_1d[:, np.newaxis]*K)
# P1*=P_ii
# assert np.allclose(P, P1)
return -np.trace(A_inv.dot(P))
def objective_econ(self, new_sample_index):
if self.L_inv.shape[0] == 0:
val = self.P[new_sample_index, new_sample_index]/self.A[
new_sample_index, new_sample_index]
return -val
A_12 = self.A[self.pivots, new_sample_index:new_sample_index+1]
L_12 = solve_triangular(self.L, A_12, lower=True)
L_22 = np.sqrt(
self.A[new_sample_index, new_sample_index] - L_12.T.dot(L_12))
C = -np.dot(L_12.T/L_22, self.L_inv)
# TODO set self.P_11 when pivot is chosen so do not constantly
# have to reduce matrix
P_11 = self.P[np.ix_(self.pivots, self.pivots)]
P_12 = self.P[self.pivots, new_sample_index:new_sample_index+1]
P_22 = self.P[new_sample_index, new_sample_index]
val = -(-self.best_obj_vals[-1] + np.sum(C.T.dot(C)*P_11) +
2*np.sum(C.T/L_22*P_12) + 1/L_22**2*P_22)
return val[0, 0]
def vectorized_objective_vals_econ(self):
if self.L_inv.shape[0] == 0:
vals = np.diagonal(self.P)/np.diagonal(self.A)
return -vals
A_12 = np.atleast_2d(self.A[self.pivots, :])
L_12 = solve_triangular(self.L, A_12, lower=True)
J = np.where((np.diagonal(self.A)-np.sum(L_12*L_12, axis=0)) <= 0)[0]
self.temp = np.diagonal(self.A)-np.sum(L_12*L_12, axis=0)
useful_candidates = np.ones(
(self.candidate_samples.shape[1]), dtype=bool)
useful_candidates[J] = False
useful_candidates[self.pivots] = False
L_12 = L_12[:, useful_candidates]
L_22 = np.sqrt(np.diagonal(self.A)[useful_candidates] - np.sum(
L_12*L_12, axis=0))
P_11 = self.P[np.ix_(self.pivots, self.pivots)]
P_12 = self.P[np.ix_(self.pivots, useful_candidates)]
P_22 = np.diagonal(self.P)[useful_candidates]
C = -np.dot((L_12/L_22).T, self.L_inv)
vals = np.inf*np.ones((self.candidate_samples.shape[1]))
vals[useful_candidates] = -(
-self.best_obj_vals[-1] +
np.sum(C.T*P_11.dot(C.T), axis=0) +
2*np.sum(C.T/L_22*P_12, axis=0) + 1/L_22**2*P_22)
return vals
def refine_econ(self):
if (self.init_pivots is not None and
len(self.pivots) < len(self.init_pivots)):
pivot = self.init_pivots[len(self.pivots)]
obj_val = self.objective_econ(pivot)
else:
# training_samples = self.ntraining_samples
obj_vals = self.vectorized_objective_vals_econ()
# obj_vals = self.objective_vals_econ()
pivot = np.argmin(obj_vals)
obj_val = obj_vals[pivot]
if not np.isfinite(obj_val): # or obj_val < -1:
# ill conditioning causes obj_val to go below -1 which should not
# be possible
return -1, np.inf
if self.L_inv.shape[0] == 0:
self.L = np.atleast_2d(self.A[pivot, pivot])
self.L_inv = np.atleast_2d(1/self.A[pivot, pivot])
return pivot, obj_val
A_12 = self.A[self.pivots, pivot:pivot+1]
L_12 = solve_triangular(self.L, A_12, lower=True)
L_22_sq = self.A[pivot, pivot] - L_12.T.dot(L_12)
if L_22_sq <= 0:
# recompute Cholesky from scratch to make sure roundoff error
# is not causing L_22_sq to be negative
indices = np.concatenate([self.pivots, [pivot]]).astype(int)
try:
self.L = np.linalg.cholesky(self.A[np.ix_(indices, indices)])
except:
return -1, np.inf
self.L_inv = np.linalg.inv(self.L)
return pivot, obj_val
L_22 = np.sqrt(L_22_sq)
self.L = np.block(
[[self.L, np.zeros(L_12.shape)],
[L_12.T, L_22]])
indices = np.concatenate([self.pivots, [pivot]]).astype(int)
L_22_inv = np.linalg.inv(L_22)
self.L_inv = np.block(
[[self.L_inv, | np.zeros(L_12.shape) | numpy.zeros |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.transforms import transforms
import numpy as np
import cv2
from functools import partial
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from models import Vgg16Conv
from models import Vgg16Deconv
from utils import decode_predictions
def load_images(img_path):
# imread from img_path
img = cv2.imread(img_path)
img = cv2.resize(img, (224, 224))
# pytorch must normalize the pic by
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
])
img = transform(img)
img.unsqueeze_(0)
#img_s = img.numpy()
#img_s = np.transpose(img_s, (1, 2, 0))
#cv2.imshow("test img", img_s)
#cv2.waitKey()
return img
def store(model):
"""
make hook for feature map
"""
def hook(module, input, output, key):
if isinstance(module, nn.MaxPool2d):
model.feature_maps[key] = output[0]
model.pool_locs[key] = output[1]
else:
model.feature_maps[key] = output
for idx, layer in enumerate(model._modules.get('features')):
# _modules returns an OrderedDict
layer.register_forward_hook(partial(hook, key=idx))
def vis_layer(layer, vgg16_conv, vgg16_deconv):
"""
visualing the layer deconv result
"""
num_feat = vgg16_conv.feature_maps[layer].shape[1]
# set other feature map activations to zero
new_feat_map = vgg16_conv.feature_maps[layer].clone()
# choose the max activations map
act_lst = []
for i in range(0, num_feat):
choose_map = new_feat_map[0, i, :, :]
activation = torch.max(choose_map)
act_lst.append(activation.item())
act_lst = np.array(act_lst)
mark = | np.argmax(act_lst) | numpy.argmax |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
from scipy.stats import multivariate_normal
from matplotlib.patches import FancyArrowPatch
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
if __name__ == '__main__':
X = np.linspace(-5.5, 5.5, 100)
Y = np.linspace(-5.5, 5.5, 100)
X, Y = np.meshgrid(X, Y)
S = np.transpose(np.stack((X, Y)))
Z = -3.0 * multivariate_normal.pdf(S, mean=[0, 0], cov= | np.matrix([[4, 2], [2, 4]]) | numpy.matrix |
"""
Module for generating lists of frames using frame features, pca, kmeans, etc.
"""
import attr
import cattr
import itertools
import logging
import numpy as np
import random
from time import time
from typing import Dict, List, Optional, Tuple
import cv2
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from skimage import draw
from skimage.feature import hog
from skimage.util.shape import view_as_windows
from sleap.io.video import Video
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True)
class BriskVec:
brisk_threshold: int
vocab_size: int
debug: bool = False
def __attrs_post_init__(self):
self._brisk = cv2.BRISK_create(thresh=self.brisk_threshold)
def get_vecs(self, imgs):
all_descs = []
row_img = []
# Create matrix with multiple brisk descriptors for each image.
for i, img in enumerate(imgs):
kps, descs = self._brisk.detectAndCompute(img, None)
# Brisk descriptor is 512 bits, but opencv returns this as 16 uint8's,
# so we'll convert it to discrete numbers.
descs = np.unpackbits(descs, axis=1)
# Make list with all brisk descriptors (or all images) and map which
# tells us which descriptor goes with which image
row_img.extend([i] * len(descs))
all_descs.append(descs)
# Convert to single matrix of descriptors
all_descs = np.concatenate(all_descs)
# Convert to single matrix of row (individual descriptor) -> image index
row_img = np.array(row_img)
# Create a bag of features for each image by clustering the brisk image
# descriptors (these clusters will be the "words" in a bag of words for
# each image), then generate vocab-length vector for each image which
# represents whether the "word" (i.e., brisk feature in some cluster)
# is present in the image.
kmeans = KMeans(n_clusters=self.vocab_size).fit(all_descs)
return self.clusters_to_vecs(kmeans.labels_, row_img, len(imgs))
# img_bags = np.zeros((len(imgs), self.vocab_size), dtype="bool")
#
# for i in range(len(imgs)):
# img_words = kmeans.labels_[row_img == i]
# img_bags[(i,), img_words] = 1
#
# return img_bags
def clusters_to_vecs(self, cluster_labels, ownership, img_count):
# Make helper function that builds bag of features vector for a single
# image by looking up all the descriptors for an image and counting
# how many there are for each cluster (vocab word).
def img_bof_vec(img_idx):
return np.bincount(
cluster_labels[ownership == img_idx], minlength=self.vocab_size
)
# Now make the matrix with a bag of features vector for each image
return np.stack([img_bof_vec(i) for i in range(img_count)])
@attr.s(auto_attribs=True)
class HogVec:
brisk_threshold: int
vocab_size: int
debug: bool = False
def __attrs_post_init__(self):
self._brisk = cv2.BRISK_create(thresh=self.brisk_threshold)
self.points_list = []
self.cmap = [
[31, 120, 180],
[51, 160, 44],
[227, 26, 28],
[255, 127, 0],
[106, 61, 154],
[177, 89, 40],
[166, 206, 227],
[178, 223, 138],
[251, 154, 153],
[253, 191, 111],
[202, 178, 214],
[255, 255, 153],
]
def get_vecs(self, imgs):
# Get matrix of hog descriptors for all images, and array which says
# which image is the source for each row.
descs, ownership = self.get_hogs(imgs)
# Cluster the descriptors into a vocabulary for bag of features
kmeans = KMeans(n_clusters=self.vocab_size).fit(descs)
if self.debug:
if imgs.shape[-1] == 1:
new_shape = (imgs.shape[0], imgs.shape[1], imgs.shape[2], 3)
self.vis = np.empty(new_shape, dtype=imgs.dtype)
self.vis[..., 0] = imgs[..., 0]
self.vis[..., 1] = imgs[..., 0]
self.vis[..., 2] = imgs[..., 0]
else:
self.vis = np.copy(imgs)
for i, img in enumerate(self.vis):
img_desc_clusters = kmeans.labels_[ownership == i]
img_points = self.points_list[i]
for point, cluster in zip(img_points, img_desc_clusters):
color = self.cmap[cluster % len(self.cmap)]
cv2.circle(img, tuple(point), 3, color, lineType=cv2.LINE_AA)
return self.clusters_to_vecs(kmeans.labels_, ownership, len(imgs))
def clusters_to_vecs(self, cluster_labels, ownership, img_count):
# Make helper function that builds bag of features vector for a single
# image by looking up all the descriptors for an image and counting
# how many there are for each cluster (vocab word).
def img_bof_vec(img_idx):
return np.bincount(
cluster_labels[ownership == img_idx], minlength=self.vocab_size
)
# Now make the matrix with a bag of features vector for each image
return np.stack([img_bof_vec(i) for i in range(img_count)])
def get_hogs(self, imgs):
"""Returns descriptors and corresponding image for all images."""
per_image_hog_descriptors = [self.get_image_hog(img) for img in imgs]
descs = np.concatenate(
[image_descs for image_descs in per_image_hog_descriptors]
)
ownership = np.array(
list(
itertools.chain.from_iterable(
[
[i] * len(image_descs)
for i, image_descs in enumerate(per_image_hog_descriptors)
]
)
)
)
return descs, ownership
def get_image_hog(self, img):
"""Returns hog descriptor for all brisk keypoints on single image."""
points = self.get_brisk_keypoints_as_points(img)
center_points = points + np.array([8, 8])
crops = self.get_image_crops(img, center_points)
multichannel = img.ndim > 2
img_descs = np.stack(
[
hog(
crop,
orientations=8,
pixels_per_cell=(16, 16),
cells_per_block=(1, 1),
visualize=False,
multichannel=multichannel,
)
for crop in crops
]
)
return img_descs
def get_image_crops(self, img, points):
"""Returns stack of windows around keypoints on single image."""
W = view_as_windows(img, (16, 16, img.shape[-1]))[..., 0, :, :, :]
max_y = W.shape[1] - 1
max_x = W.shape[0] - 1
xs = points[:, 0]
ys = points[:, 1]
# Shift crops for keypoints that are too close to edges
# TODO: is this how we should handle this case?
xs[xs > max_x] = max_x
ys[ys > max_y] = max_y
return W[xs, ys]
def get_brisk_keypoints_as_points(self, img):
"""Returns matrix of brisk keypoints for single image."""
kps = self._brisk.detect(img)
points = self.keypoints_to_points_matrix(kps)
return points
def keypoints_to_points_matrix(self, kps):
points = np.round(np.array([kps[idx].pt for idx in range(0, len(kps))])).astype(
np.int
)
self.points_list.append(points)
return points
@attr.s(auto_attribs=True, frozen=True)
class FrameItem(object):
"""Just a simple wrapper for (video, frame_idx), plus method to get image."""
video: Video
frame_idx: int
def get_raw_image(self, scale: float = 1.0):
if scale == 1.0:
return self.video[self.frame_idx]
else:
img = self.video[self.frame_idx]
_, h, w, c = img.shape
h_, w_ = int(h // (1 / scale)), int(w // (1 / scale))
# note that cv2 expects (width, height) instead of (rows, columns)
img = cv2.resize(np.squeeze(img), (w_, h_))[None, ...]
if c == 1:
img = img[..., None]
return img
@attr.s(auto_attribs=True)
class FrameGroupSet(object):
"""
Class for a set of groups of FrameItem objects.
Each item can have at most one group; each group is represented as an int.
Attributes:
method: Label for the method used to generate group set.
item_group: Dictionary which maps each item to its group.
group_data: Dictionary of any extra data for each group;
keys are group ids, values are dictionaries of data.
groupset_data: Dictionary for any data about the entire set of groups.
"""
method: str
item_group: Dict[FrameItem, int] = attr.ib(default=attr.Factory(dict))
group_data: Dict[int, dict] = attr.ib(default=attr.Factory(dict))
groupset_data: Dict = attr.ib(default=attr.Factory(dict))
def append_to_group(self, group: int, item: FrameItem):
"""Adds item to group."""
self.item_group[item] = group
if group not in self.group_data:
self.group_data[group] = dict()
def extend_group_items(self, group: int, item_list: List[FrameItem]):
"""Adds all items in list to group."""
for item in item_list:
self.append_to_group(group, item)
def get_item_group(self, item: FrameItem):
"""Returns group that contain item."""
return self.item_group.get(item, None)
@property
def groups(self):
"""Iterate over groups, yielding group and list of items."""
for group in self.group_data.keys():
item_list = [
frame_item
for (frame_item, frame_group) in self.item_group.items()
if frame_group == group
]
yield group, item_list
@property
def all_items(self):
"""Gets list of all items."""
return list(itertools.chain(self.item_group.keys()))
def sample(self, per_group: int, unique_samples: bool = True):
"""
Returns new FrameGroupSet with groups sampled from current groups.
Note that the order of items in the new groups will not match order of
items in the groups from which samples are drawn.
Args:
per_group: The number of samples to take from each group.
unique_samples: Whether to ensure that there are no shared items
in the resulting groups.
Returns:
New FrameGroupSet.
"""
new_groupset = FrameGroupSet(method="sample_groups")
new_groupset.groupset_data["per_group"] = per_group
selected_set = set()
for group, group_item_list in self.groups:
if unique_samples:
# Remove items that were already sampled from other groups
group_item_list = list(set(group_item_list) - selected_set)
# Sample items from this group
samples_from_group = np.random.choice(
group_item_list, min(len(group_item_list), per_group), False
)
# Keep track of the items we sampled so far from any group
selected_set = selected_set.union(set(samples_from_group))
# Add this sampled group to the new set of groups
# samples_from_group.sort()
new_groupset.extend_group_items(group, list(samples_from_group))
return new_groupset
@attr.s(auto_attribs=True)
class ItemStack(object):
"""
Container for items, each item can "own" one or more rows of data.
Attributes:
items: The list of items
data: An ndarray with rows of data corresponding to items.
ownership: List which specifies which rows of data correspond to which
items.
meta: List which stores metadata about each operation on stack.
group_sets: List of GroupSets of items.
"""
items: List = attr.ib(default=attr.Factory(list))
data: Optional[np.ndarray] = attr.ib(default=None, repr=False)
ownership: Optional[List[tuple]] = None
meta: List = attr.ib(default=attr.Factory(list))
group_sets: List[FrameGroupSet] = attr.ib(default=attr.Factory(list))
@property
def current_groupset(self):
"""Gets current (most recent) group set."""
if not self.group_sets:
return None
return self.group_sets[-1]
def get_item_data_idxs(self, item):
"""Returns indexes of rows in data which belong to item."""
item_idx = self.items.index(item)
if self.ownership:
owns = self.ownership[item_idx]
else:
owns = tuple([item_idx])
return owns
def get_item_data(self, item):
"""Returns rows of data which belong to item."""
owns = self.get_item_data_idxs(item)
return self.data[owns, ...]
def get_item_by_data_row(self, row_idx):
if self.ownership:
for item_idx, owns in enumerate(self.ownership):
if row_idx in owns:
return self.items[item_idx]
elif len(self.items) > row_idx:
return self.items[row_idx]
raise IndexError(f"No ownership for row {row_idx}.")
def extend_ownership(self, ownership, row_count):
"""Extends an ownership list with number of rows owned by next item."""
start_i = 0
if len(ownership):
# Start at 1 + (last row index of last item so far)
start_i = 1 + ownership[-1][-1]
item_owns = list(range(start_i, start_i + row_count))
ownership.append(item_owns)
def get_raw_images(self, scale=0.5):
"""Sets data to raw image for each FrameItem."""
self.meta.append(dict(action="raw_images"))
data_shape = [1, 1, 1]
mixed_shapes = False
imgs = []
for frame in self.items:
# Add to list of raw images
img = frame.get_raw_image(scale=scale)
imgs.append(img)
# Keep track of shape large enough to hold any of the images
img_shape = img.shape
data_shape = [max(data_shape[i], img_shape[i + 1]) for i in (0, 1, 2)]
if data_shape != img_shape:
mixed_shapes = True
if mixed_shapes:
# Make array large enough to hold any image and pad smaller images
self.data = np.zeros((len(self.items), *data_shape), dtype="uint8")
for i, img in enumerate(imgs):
_, rows, columns, channels = img.shape
self.data[i, :rows, :columns, :channels] = img
else:
self.data = np.concatenate(imgs)
def flatten(self):
"""Flattens each row of data to 1-d array."""
meta = dict(action="flatten", shape=self.data.shape[1:])
self.meta.append(meta)
row_count = self.data.shape[0]
row_size = | np.product(meta["shape"]) | numpy.product |
import pytest
import sys
import os
from math import trunc, ceil, floor
import numpy as np
sys.path.insert(0, os.getcwd())
from uncvalue import Value, val, unc, set_unc # noqa: E402
ϵ = 1e-8
a = Value(3.1415, 0.0012)
b = Value(-1.618, 0.235)
c = Value(3.1264e2, 1.268)
A = np.array([[a, a], [b, b], [c, c]])
B = Value([a.x] * 5, a.ux)
C = Value([b.x] * 5, [b.ux] * 5)
@pytest.mark.parametrize('v, x', [
(a, a.x),
(A, np.array([[a.x, a.x], [b.x, b.x], [c.x, c.x]])),
(B, a.x),
(a.x, a.x)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_val(v, x):
assert np.all(val(v) == x)
@pytest.mark.parametrize('v, x', [
(a, a.ux),
(A, np.array([[a.ux, a.ux], [b.ux, b.ux], [c.ux, c.ux]])),
(B, a.ux),
(a.x, 0)],
ids=['Single', 'Array of values', 'Value array', 'Number'])
def test_unc(v, x):
assert np.all(unc(v) == x)
def test_set_unc():
v = set_unc(0.234, 0.0052)
assert isinstance(v, Value)
assert v.x == 0.234
assert v.ux == 0.0052
v = set_unc(a, 0.0052)
assert isinstance(v, Value)
assert v.x == a.x
assert v.ux == 0.0052
v = set_unc([0.234] * 8, 0.0052)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
v = set_unc([0.234] * 8, [0.0052] * 8)
assert isinstance(v, np.ndarray)
assert v.shape == (8, )
assert np.mean(unc(v)) == 0.0052
with pytest.raises(ValueError):
set_unc(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
def test_constructor():
v = Value(3.1415, 0.0012)
assert v.x == 3.1415 == v.val
assert v.ux == 0.0012 == v.unc
with pytest.raises(ValueError):
Value(3.14, -0.28)
V = Value([3.1415] * 8, 0.0012)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
V = Value([3.1415] * 8, [0.0012] * 8)
assert V.x.shape == (8, )
assert V.ux.shape == (8, )
assert np.mean(V.ux) == 0.0012
with pytest.raises(ValueError):
Value(np.random.random((3, 2, 1)), np.random.random((4, 2, 1)))
with pytest.raises(ValueError):
Value(1j, 0)
Value(1, 2j)
@pytest.mark.parametrize('x, y, r', [
(a.x, a, False),
(a, a.x, False),
(a, Value(a.x, a.ux * 5), False),
(b, a, True),
(a, a - 0.0001, False),
(A, A, False),
(B, C, False)],
ids=['Right', 'Left', 'Both', 'Different', 'Within unc', 'Array eq', 'Array dif'])
def test_smaller(x, y, r):
assert np.all((x < y) == r)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a, 1, Value(a.x + 1, a.ux)),
(a, b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(1, A, np.array([[a+1, a+1], [b+1, b+1], [c+1, c+1]])),
(a, A, np.array([[a+a, a+a], [b+a, b+a], [c+a, c+a]])),
(1, B, Value(1 + B.x, B.ux)),
(a, B, Value(a.x + B.x, np.hypot(a.ux, B.ux))),
(A, A, np.array([[a+a, a+a], [b+b, b+b], [c+c, c+c]])),
(B, C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Number + Array', 'Value + Array', 'Array of values',
'Number + Valued array', 'Value + Valued array', 'Valued array'])
def test_add(x, y, r):
z = x + y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(a.x + 1, a.ux)),
(a.copy(), 1, Value(a.x + 1, a.ux)),
(a.copy(), b, Value(a.x + b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x + C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_iadd(x, y, r):
x += y
assert isinstance(x, Value)
assert np.all(x.x == r.x)
assert np.all(x.ux == r.ux)
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a, 1, Value(a.x - 1, a.ux)),
(a, b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(A, A, np.array([[a-a, a-a], [b-b, b-b], [c-c, c-c]])),
(B, C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array of values', 'Valued array'])
def test_sub(x, y, r):
z = x - y
assert np.all(val(z) == val(r))
assert np.all(unc(z) == unc(r))
@pytest.mark.parametrize('x, y, r', [
(1, a, Value(1 - a.x, a.ux)),
(a.copy(), 1, Value(a.x - 1, a.ux)),
(a.copy(), b, Value(a.x - b.x, np.hypot(a.ux, b.ux))),
(B.copy(), C, Value(B.x - C.x, np.hypot(B.ux, C.ux))),
],
ids=['Right', 'Left', 'Both', 'Array'])
def test_isub(x, y, r):
x -= y
assert isinstance(x, Value)
assert | np.all(x.x == r.x) | numpy.all |
import numpy as np
import pandas as pd
from scipy import optimize
from scipy.stats import norm, truncnorm, t, chi2
from scipy.stats import multivariate_normal as MVN
from scipy.linalg import cholesky
from scipy.integrate import quad
from scipy.optimize import minimize_scalar
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
from time import time
def rvec(x):
return np.atleast_2d(x)
def cvec(x):
return rvec(x).T
def vprint(stmt, verbose=True):
if verbose:
print(stmt)
class BVN():
def __init__(self, mu, sigma, rho):
"""
mu: array of means
sigma: array of variances
rho: correlation coefficient
"""
if isinstance(mu,list):
mu, sigma = np.array(mu), np.array(sigma)
assert mu.shape[0]==sigma.shape[0]==2
assert np.abs(rho) <= 1
self.mu = mu.reshape([1,2])
self.sigma = sigma.flatten()
od = rho*np.sqrt(sigma.prod())
self.rho = rho
self.Sigma = np.array([[sigma[0],od],[od, sigma[1]]])
self.A = cholesky(self.Sigma) # A.T.dot(A) = Sigma
# size=1000;seed=1234 # del size, seed
def rvs(self, size, seed=None):
"""
size: number of samples to simulate
seed: to pass onto np.random.seed
"""
np.random.seed(seed)
X = np.random.randn(size,2)
Z = self.A.T.dot(X.T).T + self.mu
return Z
def imills(self, a):
return norm.pdf(a)/norm.cdf(-a)
def sheppard(self, theta, h, k):
return (1/(2*np.pi))*np.exp(-0.5*(h**2+k**2-2*h*k*np.cos(theta))/(np.sin(theta)**2))
# h, k = -2, -np.infty
def orthant(self, h, k, method='scipy'):
# P(X1 >= h, X2 >=k)
assert method in ['scipy','cox','sheppard']
if isinstance(h,int) or isinstance(h, float):
h, k = np.array([h]), np.array([k])
else:
assert isinstance(h,np.ndarray) and isinstance(k,np.ndarray)
assert len(h) == len(k)
# assert np.all(h >= 0) and np.all(k >= 0)
# Calculate the number of standard deviations away it is
Y = (np.c_[h, k] - self.mu)/np.sqrt(self.sigma)
Y1, Y2 = Y[:,0], Y[:,1]
# (i) scipy: L(h, k)=1-(F1(h)+F2(k))+F12(h, k)
if method == 'scipy':
sp_bvn = MVN([0, 0],[[1,self.rho],[self.rho,1]])
pval = 1+sp_bvn.cdf(Y)-(norm.cdf(Y1)+norm.cdf(Y2))
return pval
# A Simple Approximation for Bivariate and Trivariate Normal Integrals
if method == 'cox':
mu_a = self.imills(Y1)
root = np.sqrt(1-self.rho**2)
xi = (self.rho * mu_a - Y2) / root
pval = norm.cdf(-Y1) * norm.cdf(xi)
return pval
if method == 'sheppard':
pval = np.array([quad(self.sheppard, np.arccos(self.rho), np.pi, args=(y1,y2))[0] for y1, y2 in zip(Y1,Y2)])
return pval
# mu=np.array([[0,0],[1,1]]);tau=np.array([[1,1],[1,1]]);a=np.array([1,1]); b=np.array([np.inf,np.inf])
# mu = np.repeat(0,20).reshape([10,2]); tau=np.linspace(1,2,21)[:-1].reshape([10,2])
# a = np.repeat(1,10); b=np.repeat(np.inf,10)
# self = NTS(mu, tau, a, b)
# mu, tau, a, b = [0,0], [1,1], 1, np.inf
# self = NTS(mu, tau, a, b)
class NTS():
def __init__(self, mu, tau, a, b):
"""
mu: matrix/array of means
tau: matrix/array of standard errors
a: array of lower bounds
b: array of upper bounds
"""
if not (isinstance(mu,np.ndarray) & isinstance(tau,np.ndarray)):
mu, tau = np.array(mu), np.array(tau)
assert mu.shape == tau.shape
if len(mu.shape) == 2:
assert mu.shape[1] == tau.shape[1] == 2
a, b = np.array(a), np.array(b)
else:
assert mu.shape[0] == tau.shape[0] == 2
mu, tau = rvec(mu), rvec(tau)
a, b, = np.array([a]), np.array([b])
self.r = mu.shape[0]
assert self.r == len(a) == len(b)
self.mu, self.tau, self.a, self.b = mu, tau, a.flatten(), b.flatten()
# Truncated normal (Z2)
self.alpha = (self.a - self.mu[:,1]) / self.tau[:,1]
self.beta = (self.b - self.mu[:,1]) / self.tau[:,1]
self.Z = norm.cdf(self.beta) - norm.cdf(self.alpha)
self.Q = norm.pdf(self.alpha) - norm.pdf(self.beta)
# Average will be unweighted combination of the two distributions
self.mu_W = self.mu[:,0] + self.mu[:,1] + self.tau[:,1]*self.Q/self.Z
if np.prod(self.mu_W.shape) == 1:
self.mu_W = self.mu_W[0]
# Distributions
self.dist_X1 = norm(loc=self.mu[:,0], scale=self.tau[:,0])
self.dist_X2 = truncnorm(a=self.alpha, b=self.beta, loc=self.mu[:,1], scale=self.tau[:,1])
# W
self.theta1 = self.mu.sum(1)
self.theta2 = self.mu[:,1]
self.sigma1 = np.sqrt(np.sum(self.tau**2,1))
self.sigma2 = self.tau[:,1]
self.rho = self.sigma2/self.sigma1
# Initialize BVN for CDF
self.di_BVN = {i:BVN(mu=[0,0],sigma=[1,1],rho=self.rho[i]) for i in range(self.r)}
def reshape(self, x):
if isinstance(x, list) | isinstance(x, pd.Series):
x = | np.array(x) | numpy.array |
import numpy
import threading
def merge_times(time1,time2):
"""
merge two time vectors and remove duplicates
"""
times = numpy.append(time1,time2)
resortIndices = numpy.argsort(times)
times = times[resortIndices]
diff = numpy.diff(times)
diff = numpy.append([1],diff)# the diff has one less, so we add one more
indices = diff != 0
times = times[indices] # take only those indices which are different from the previous value
return times
class TimeSeries:
def __init__(self,values=[],times=[],allocSize = 10000):
self.allocSize = allocSize
self.times = numpy.asarray(times,dtype=numpy.float64)
self.values = numpy.asarray(values,dtype=numpy.float64)
self.lastValidIndex = self.times.size-1
self.lock = threading.RLock()
def __realloc(self,size=0):
alloc = numpy.full(self.allocSize+size, numpy.nan, dtype=numpy.float64)
with self.lock:
self.values=numpy.append(self.values,alloc)
self.times =numpy.append(self.times,alloc)
def insert(self, values=None, times=None, profiling =None, allowDuplicates=False):
#convert to numpy
if type(times) is not numpy.ndarray:
times = numpy.asarray(times)
if type(values) is not numpy.ndarray:
values = numpy.asarray(values)
#incoming vector must make sense
if values.size != times.size:
return False
if values.size == 0:
return True
with self.lock:
lastOldValueIndex = self.lastValidIndex #remember for later
if not allowDuplicates:
# sort the new values before inserting and remove duplicates of the times
times,indices = numpy.unique(times, return_index=True)
values = values[indices]
else:
#keep the duplicates but sort
sortedIndices = numpy.argsort(times)
times = times[sortedIndices]
values = values[sortedIndices]
remainingSpace = (self.times.size-1)-self.lastValidIndex
newLen = values.size
if remainingSpace < newLen:
self.__realloc(newLen)
if profiling: profiling.lap("alloc")
if lastOldValueIndex==-1 or times[0]>self.times[lastOldValueIndex]:
#simply append the data
start = self.lastValidIndex + 1
self.values[start:start + newLen] = values
self.times[start:start + newLen] = times
self.lastValidIndex += newLen
if profiling: profiling.lap(f"put in array")
else:
#must insert and avoid producing dublicates
insertIndicesLeft = numpy.searchsorted(self.get_times(),times,side="right")
if profiling: profiling.lap(f"searchsorted")
inserted = 0 # the number of elements already inserted, for each already inserted element, we need to add 1 on the insert index for the remaining elements to insert
for idx,newTime,newValue in zip(insertIndicesLeft,times,values):
index = idx + inserted
if not allowDuplicates and self.times[index-1] == newTime:
#this is just a replacement of existing data
self.values[index-1] = newValue
else:
#this is a real new one, we must insert at position index
#make room
self.times[index+1:]=self.times[index:-1]
self.values[index + 1:] = self.values[index:-1]
#put in
self.times[index]=newTime
self.values[index] = newValue
self.lastValidIndex += 1
inserted = inserted+1
if profiling: profiling.lap(f"inserted in array with shift and duplicates")
return True
def delete_area(self,start=None,end=None,includeEnd=True):
#delete all data where start<=times<=end
if type(start) == type(None):
left = 0
else:
left = numpy.searchsorted(self.get_times(), start,side="left")
if type(end) == type(None):
# this is a tail delete that is easy:
print("adjust last valid in des {self.lastValidIndex} -> {left -1}")
self.lastValidIndex = left -1
else:
if includeEnd:
right = numpy.searchsorted(self.get_times(), end, side="right")
else:
right = numpy.searchsorted(self.get_times(),end,side="left")
siz = right-left
self.times[left:self.lastValidIndex - siz+1] = self.times[right:self.lastValidIndex+1]
self.values[left:self.lastValidIndex - siz+1] = self.values[right:self.lastValidIndex+1]
self.lastValidIndex = self.lastValidIndex - siz
return True
def set_masked(self, values, mask):
with self.lock:
#only with indices
if len(values) == len(self.values):
return False
self.values[mask]=values[mask]
return True
def set(self,values=None,times=None,withAllocSpace=False):
"""
Args:
withAllocSpace: if set true, we also make space for additional values
"""
# if both are set at the same time, we assure the right order
#make sure the incoming is in time order and correct type
# there are cases where values and times are written in two steps (e.g. the timeseries.load from npz)
# for those cases we bypass theses extra checks
#type conversion
if type(times) != type(None):
times = numpy.asarray(times,dtype=numpy.float64)
if type(values) != type(None):
values = numpy.asarray(values,dtype=numpy.float64)
#check order if both come in at the same time
if type(times)!= type(None) and type(values)!= type(None) and len(times)>0 and len(values)>0:
orderedIndices = numpy.argsort(times)
times = times[orderedIndices]
values = values[orderedIndices]
with self.lock:
if type(values) != type(None):
if withAllocSpace:
alloc = numpy.full(self.allocSize, numpy.nan, dtype=numpy.float64)
self.values = numpy.append(values, alloc)
else:
self.values = numpy.copy(values)
self.lastValidIndex = len(values) -1
if type(times) != type(None):
if withAllocSpace:
alloc = numpy.full(self.allocSize, numpy.nan, dtype=numpy.float64)
self.times =numpy.append(times, alloc)
else:
self.times = numpy.copy(times)
self.lastValidIndex = len(times) - 1
return True
def get_values(self):
return self.values[0:self.lastValidIndex+1]
def get_times(self):
return self.times[0:self.lastValidIndex+1]
def get_len(self):
return self.lastValidIndex+1
def get_last_time_stamp(self):
if self.lastValidIndex != -1:
return self.times[self.lastValidIndex]
else:
return None
def get(self, start=None, end=None, copy=False, resampleTimes = None, noBins = None, includeIntervalLimits = False, resampleMethod = None, includeAllNan = False):
"""
request data and resample it
typical use cases:
1) give a start and end time and the number of bins: this returns a vector of noBins elements including start and end time
if the data does not have enough points between start and end, we do NOT deliver more
if the data does have more points than noBins between start and end , we downsample by picking values
2) use the resampleTimes option:
give a vector resampleTimes containing time points at which we want to get results, if necessary, the data will be resampled using
the resampleMethod
Args
start [float]: the start time of the data query in epoch seconds
end [float] : the end time of the data query in epoch seconds
copy [bool]: if False, we only get the pointer to the numpy array, not a copy. This saves time, but the data should only be read!
if True we get a full copy of the requested data
resampleTimes [numpy.array float]: the time points to return in the result
noBins : if start and end time is given, we can query a certain number of data points, the dat will be up/down sampled with sample and hold
includeIntervalLimits: if set to true, we will include one more data point each left and right of the requested time
resampleMethod [enum]: how to resample if we need to; options are:
"samplehold" sample and hold
"linear": linear interpolation
"linearfill": linear interpolation and also interpolate "nan" or "inf" values in the original data
"outlier"
includeAllNan: if set true, we will return all existing nan in the requested interval no matter if they match the resampling
... currently only supported for "bin" queries (typically from the UI)
Return [dict]
{"values":[..],"__time":[...]}
"""
with self.lock:
haveData = True
#remainingSpace = numpy.count_nonzero(numpy.isnan(self.times)) #the times will not have any intermediate nan, only at the end
#lastValidIndex = len(self.times)-remainingSpace-1
lastValidIndex = self.lastValidIndex
if type(start) is not type(None):
if start < 0:
# we support the -start time, endtime = None, typically used for streaming
# to query interval from the end
# get the last time and subtract the query time
if lastValidIndex == -1:
start = 0
else:
lastTime = self.times[lastValidIndex]
start = lastTime + start # look back from the end, note that start is negative
if start < 0:
start = 0
startIndex = numpy.searchsorted(self.get_times(), start,"left") #the first index to take
else:
startIndex = 0
if type(end) is not type(None):
endIndex = numpy.searchsorted(self.get_times(), end, side="right") # this endIndex is one more than the last that we take
else:
endIndex = lastValidIndex +1
startIndex = max(startIndex,0) # make sure, startIndex is not negative
if startIndex == endIndex:
haveData = False
else:
#assure limits
if startIndex > lastValidIndex:
#this means, all the data is left from the query, we should no provide data
haveData = False
if endIndex > lastValidIndex+1:
endIndex = lastValidIndex+1
if haveData:
if type(resampleTimes) == type(None):
#print(f"startIdex:{startIndex}:{self.times[startIndex]}, endIndex:{endIndex-1}:{self.times[endIndex-1]}, diff:{self.times[endIndex-1]-self.times[startIndex]}")
#print(f"lastvalid {lastValidIndex}:{self.times[lastValidIndex]} ")
if includeIntervalLimits:
if startIndex != 0:
startIndex = startIndex -1
if endIndex < lastValidIndex +1: #we can go one above, as the linspace and arange do not include the right
endIndex = endIndex +1
if noBins:
#we pick samples only if we have more than requested
if (endIndex-startIndex)>noBins:
if resampleMethod=="outlier":
#take the outliers
firstIndices = numpy.linspace(startIndex, endIndex-1, noBins+1, endpoint=True, dtype=int) # one more than the bins
takeIndices = []
for idxOfIndices in range(len(firstIndices)-1):
startIdx = firstIndices[idxOfIndices]
endIdx = firstIndices[idxOfIndices+1]
values = self.values[startIdx:endIdx]
#take = numpy.argmax(values)+startIdx # take the maximum valur
values = numpy.abs(values-numpy.mean(values)) #find the max deviating value
take = numpy.argmax(values)+startIdx
takeIndices.append(take)
takeIndices = numpy.asarray(takeIndices,dtype=numpy.int)
else:
takeIndices = numpy.linspace(startIndex, endIndex - 1, noBins, endpoint=True, dtype=int)
else:
takeIndices = numpy.arange(startIndex, endIndex) #arange excludes the last
if includeAllNan:
#now look in the full data between start and end and find all Nans there, add those indices to the total take indices
nanIndices = numpy.where(~numpy.isfinite(self.values[startIndex:endIndex]))[0] + startIndex
takeIndices = numpy.append(takeIndices,nanIndices)
takeIndices = numpy.unique(takeIndices) #unique: sort and remove dublicates
times = self.times[takeIndices]
values = self.values[takeIndices]
else:
#takeIndices = numpy.arange(startIndex,endIndex) #arange exludes the last
times = self.times[startIndex:endIndex]
values = self.values[startIndex:endIndex]
else:
#must resample the data
#oldTimes = self.times[startIndex:endIndex]
#oldValues = self.values[startIndex:endIndex]
if resampleMethod == "linear":
values = numpy.interp(resampleTimes,self.get_times(),self.get_values())
elif resampleMethod == "linearfill":
#fill the nans with data
indices = numpy.isfinite(self.get_values())
values = numpy.interp(resampleTimes, self.get_times()[indices], self.get_values()[indices])
else:
#the default is ffill
values = self.__resample_ffill(resampleTimes, self.get_times(), self.get_values())
times = resampleTimes
else:
times=numpy.asarray([])
values=numpy.asarray([])
if copy:
result = {"__time": numpy.copy(times), "values":numpy.copy(values)}
else:
result = {"__time": times, "values": values}
return result
def __resample_ffill(self,newTimes, oldTimes, values):
"""
resample the values along a new time axis
up/down samping is possible, and distances are possible
if upsamping
Args:
newTimes: a numpy.array of time values (typically epoch seconds in UTC
oldTimes: the old time stamps of the values, must have same length as values
values: a numpy array of values to be up/down sampled
method: the method used when up/down sampling. we currently only support the
"ffill":
@downsampling: sample values (we take the last value in time (sample and hold)
@upsampling: we also take the last old value in time (forward fill, sample and hold)
Returns:
numpy array of type values of the length of newTimes with the up/down-sampled values
"""
newValues = numpy.full(len(newTimes), numpy.nan)
# if the searchsorted gives index 0, we must put the new value in front of the old and use nan for the value
# as we don't have any valid previous value, the fastest way to do this is to put a value infront and use
# the searchsorted as a vector process with the "right", e.g. right a[i-1] <= v < a[i],
# so we get the found index i as one bigger than out timestamp (which is then <= the searched time)
# we should actually take the result index -1 to have the right index, but the values have been shifted by one to the right, so
# we are fine
# example:
# oldTimes = 11,12,13
# old values = 11,12,13
# newTimes 10,10.5,11,11.5
# preprocessing values preold= nan,11,12,13
# searchsorted (10,10.5,11,11.5 "right" gives: 0,0,1,1)
# result is preold[0,0,1,1] =[nan,nan,11,11]
myvals = numpy.insert(values, 0, numpy.nan)
found = numpy.searchsorted(oldTimes, newTimes, "right")
newValues = myvals[found]
return newValues
def merge(self,timeseries):
"""
merge another time series into this times series:
the times will be all time points existing
double times will be removed
the new values will get priority on dublicate times
"""
with self.lock:
mergeTimes = merge_times(self.get_times(),timeseries.get_times())
oldValues = self.get(resampleTimes=mergeTimes)["values"]
if len(oldValues) == 0:
#the oldValues is empty, so we create a NaN array
oldValues = numpy.full(len(mergeTimes),numpy.nan,dtype=numpy.float64)
newValues = timeseries.get(resampleTimes=mergeTimes)["values"]
indices=numpy.isfinite(newValues)
oldValues[indices]=newValues[indices]
self.set(oldValues,mergeTimes)
class TimeSeriesTable:
"""
the TSTable is an API class for the access of tabled data
for any alternative implementation, the api should be implemented such as create, insert etc.
"""
def __init__(self,allocSize = 10000):
self.store = {}
self.allocSize = allocSize
def get_items(self):
"""
Returns:
a list of keys which are the ids of the timeseries stored here
"""
return list(self.store.keys()) # this makes a copy
def create(self,name,allocSize=None):
if type(allocSize) is type(None):
allocSize = self.allocSize
self.store[name]=TimeSeries(allocSize=allocSize)
return True
def delete(self,name):
if name in self.store:
del self.store[name]
return True
def clear(self):
self.store={}
def insert(self,name,values=None,times=None,allowDuplicates = False):
if name not in self.store:
self.create(name)
return self.store[name].insert(values,times,allowDuplicates = allowDuplicates)
def append(self, name, values=None, times=None):
if name not in self.store:
self.create(name)
return self.store[name].insert(values,times)
def set(self,name,values = None,times = None):
return self.store[name].set(values,times)
def get_table(self, names, start=None, end=None, copy=False, resampleTimes=None, noBins = None, includeIntervalLimits=False,resampleMethod = None, includeAllNan = False):
"""
returns raw data dict with {name:{"values":[..],"__time":[...], "name2":{"values":[..], "__time":[..]
"""
if not type(names) is list:
names = [names]
result = {}
#we are handling here a special case of a time query for the "end" of data via end=None, start =-time
# we must cover the case where the requested variables have different end times, we will use the one with the newest data a a reference for all of them
if (type(end) is type(None)) and (not type(start) is type(None)):
#this is a streaming request, get the newest possible time point of all vars
lastsraw = [self.store[name].get_last_time_stamp() for name in names if name in self.store]
lasts = []
for last in lastsraw:
if type(last) is type(None):
continue
if numpy.isfinite(last):
lasts.append(last)
if lasts != []:
end = numpy.max(lasts)
start = end + start #(start was negative)
else:
#all are empty, the store.get will handle this
pass
for name in names:
if name in self.store:
includeNan = False
if includeAllNan == True:
includeNan = True
if type(includeAllNan) is list:
#it is a selection list:
if name in includeAllNan:
includeNan = True
result[name]=self.store[name].get(start=start,end=end,copy=copy,resampleTimes=resampleTimes,noBins=noBins,includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod,includeAllNan=includeNan)
return result
def get_info(self,name = None):
if not name:
s=""
for k,v in self.store.items():
s=s+f"{k}: {v.get()}"+"\n"
return s
else:
if not name in self.store:
return f"time series data not found"
else:
dat = self.store[name].get()['values']
return f"time series len {len(dat)}, data:{dat[0:min(len(dat),10)]}"
def delete_area(self,name,start=None,end=None):
if name not in self.store:
return False
return self.store[name].delete_area(start,end)
def insert_blobs(self,blobs):
""" blob is a dict or list of dicts of key and values containing one time base like
{
"root.vars.a": [1.5,1.6,1.7]m
"root.vars.b": [2,3,4]
"__time" :[100001,100002,100003]
}
"""
if not type(blobs) is list:
blobs = [blobs]
return [self.__insert_blob(b) for b in blobs]
def __insert_blob(self,blob):
if not "__time" in blob:
return False
for k,v in blob.items():
if numpy.isscalar(v):
blob[k]=numpy.asarray([v])
lens = [len(v) for k,v in blob.items()]
if len(set(lens)) != 1:
#the lengths differ, we can't process
return False
for k,v in blob.items():
if k=="__time":
continue
if k not in self.store:
self.create(k)
self.store[k].insert(values=v,times=blob["__time"])
def merge(self,name,values,times):
"""
merge the additional into the origin see merge function to
"""
if name not in self.store:
return False
ts = TimeSeries(values = values, times= times)
return self.store[name].merge(ts)
def save(self,name):
saveDict = {}
for k,v in self.store.items():
ts = v.get()
saveDict[k] = ts["values"]
saveDict[k+"__time"] = ts["__time"]
numpy.savez(name, **saveDict)
def load(self,name):
get = | numpy.load(name+".npz") | numpy.load |
import numpy as np
from typing import Optional, Union, Tuple, Callable, List
from ..initialisations import lookup_normal_init
from ....utils.misc import to_device
from .abs_block import AbsBlock
from torch import Tensor
import torch.nn as nn
__all__ = ['ClassRegMulti']
class AbsTail(AbsBlock):
def __init__(self, n_in:int, n_out:int, objective:str, bias_init:Optional[float]=None,
lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init, freeze:bool=False):
super().__init__(lookup_init=lookup_init, freeze=freeze)
self.n_in,self.n_out,self.objective,self.bias_init = n_in,n_out,objective,bias_init
class ClassRegMulti(AbsTail):
r'''
Output block for (multi(class/label)) classification or regression tasks.
Designed to be passed as a 'tail' to :class:`~lumin.nn.models.model_builder.ModelBuilder`.
Takes output size of network body and scales it to required number of outputs.
For regression tasks, y_range can be set with per-output minima and maxima. The outputs are then adjusted according to ((y_max-y_min)*x)+self.y_min, where x
is the output of the network passed through a sigmoid function. Effectively allowing regression to be performed without normalising and standardising the
target values. Note it is safest to allow some leaway in setting the min and max, e.g. max = 1.2*max, min = 0.8*min
Output activation function is automatically set according to objective and y_range.
Arguments:
n_in: number of inputs to expect
n_out: number of outputs required
objective: string representation of network objective, i.e. 'classification', 'regression', 'multiclass'
y_range: if not None, will apply rescaling to network outputs: x = ((y_range[1]-y_range[0])*sigmoid(x))+y_range[0].
Incompatible with `y_mean` and `y_std`
bias_init: specify an intial bias for the output neurons. Otherwise default values of 0 are used, except for multiclass objectives, which use 1/n_out
y_mean: if sepcified along with `y_std`, will apply rescaling to network outputs: x = (y_std*x)+y_mean.
Incopmpatible with `y_range`
y_std: if sepcified along with `y_mean`, will apply rescaling to network outputs: x = (y_std*x)+y_mean.
Incopmpatible with `y_range`
lookup_init: function taking string representation of activation function, number of inputs, and number of outputs an returning a function to initialise
layer weights.
Examples::
>>> tail = ClassRegMulti(n_in=100, n_out=1, objective='classification')
>>>
>>> tail = ClassRegMulti(n_in=100, n_out=5, objective='multiclass')
>>>
>>> y_range = (0.8*targets.min(), 1.2*targets.max())
>>> tail = ClassRegMulti(n_in=100, n_out=1, objective='regression',
... y_range=y_range)
>>>
>>> min_targs = np.min(targets, axis=0).reshape(targets.shape[1],1)
>>> max_targs = np.max(targets, axis=0).reshape(targets.shape[1],1)
>>> min_targs[min_targs > 0] *=0.8
>>> min_targs[min_targs < 0] *=1.2
>>> max_targs[max_targs > 0] *=1.2
>>> max_targs[max_targs < 0] *=0.8
>>> y_range = np.hstack((min_targs, max_targs))
>>> tail = ClassRegMulti(n_in=100, n_out=6, objective='regression',
... y_range=y_range,
... lookup_init=lookup_uniform_init)
'''
# TODO: Automate y_range calculation with adjustable leeway
def __init__(self, n_in:int, n_out:int, objective:str, y_range:Optional[Union[Tuple,np.ndarray]]=None, bias_init:Optional[float]=None,
y_mean:Optional[Union[float,List[float],np.ndarray]]=None, y_std:Optional[Union[float,List[float],np.ndarray]]=None,
lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init, freeze:bool=False):
super().__init__(n_in=n_in, n_out=n_out, objective=objective, bias_init=bias_init, lookup_init=lookup_init, freeze=freeze)
self.y_range,self.y_mean,self.y_std = y_range,y_mean,y_std
self.rescale = False
if self.y_range is not None and (self.y_mean is not None or self.y_std is not None):
raise ValueError("Both y_range (sigmoid output and rescaling) and y_mean + y_std (linear output and rescaling) are set. Please only set either.")
if (self.y_mean is None and self.y_std is not None) or (self.y_mean is not None and self.y_std is None):
raise ValueError("Only one of y_mean or y_std is set, but not both. Please set both or neither.")
if self.y_mean is not None and self.y_std is not None and bias_init is not None:
print("y_mean and y_std are both set, but so is bias_init. Bias init will be set to zero to provide accurate rescaling")
self.bias_init = None
if self.y_range is not None:
if not isinstance(self.y_range, np.ndarray): self.y_range = | np.array(self.y_range) | numpy.array |
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__)
@app.route('/')
def login():
return render_template('option.html')
# 获取提交参数及处理
@app.route('/sub', methods=['POST'])
def getLoginRequest():
q1 = request.values.get("q1")
q2 = request.values.get("q2")
q3 = request.values.get("q3")
q4 = request.values.get("q4")
q5 = request.values.get("q5")
q6 = request.values.get("q6")
q7 = request.values.get("q7")
q8 = request.values.getlist("q8")
q9 = request.values.get("q9")
q10 = request.values.get("q10")
q11 = request.values.get("q11")
q12 = request.values.get("q12")
q13 = request.values.getlist("q13")
q14 = request.values.getlist("q14")
# if q1!=None and q2!=None and q3!=None and q4!=None and q5!=None and q6!=None and q7!=None and q8!=[] and q9!=[]and q10!=[]:
## Calculate the result and transform it into an integer
sum = int(q1) + int(q2) + int(q3) + int(q4) + int(q5) + int(q6) + int(q7) + int(q9) + int(q10) + int(q11) + int(q12)
## Classify
num=12
if sum in range(12, 17):
level = 1
elif sum in range(17, 22):
level = 2
elif sum in range(22, 27):
level = 3
elif sum in range(27, 31):
level = 4
elif sum in range(31, 35):
level = 5
elif sum in range(35, 39):
level = 6
elif sum in range(39, 43):
level = 7
elif sum in range(43, 45):
level = 8
elif sum in range(45, 46):
level = 9
elif sum in range(46, 48):
level = 10
else:
level = 1
from rpy2 import robjects
robjects.r('''
library("xml2")
library("dplyr")
library("stringr")
library("rlist")
library("xts")
library("quantmod")
library("PortfolioAnalytics")
library("DEoptim")
library("ROI")
require("ROI.plugin.glpk")
require("ROI.plugin.quadprog")
weights<-function(ins) {
### Read data from last milestone
data = read.csv("complete.csv",header = TRUE, fileEncoding = "UTF-8", sep=",", na.strings = "")
data_print_out = data
#Number of funds in the final portforlio, can be specified
portfolio_num = 5
fund_num = portfolio_num*10
fund_num_tmp = floor(fund_num/10)
fund_num_upper = fund_num_tmp * ins
if(ins == 10)
{
fund_num_upper = fund_num
}
selected <- data[,(fund_num_tmp*(ins-1)+1+1):(fund_num_upper+1)] #first plus one: begin with 6-10; second plus one: first column of data is date
selected <- cbind(data[,1],selected)
data <- selected
data_print_out = data
### Construct the system by Markowitz model
#Initialization
rows = nrow(data)
returns_tmp = c()
return_tmp = data.frame()
for (j in 1:portfolio_num){
data_tmp <- data.frame(data[,1],data[,j+1])
data_tmp <- xts(data_tmp[,2],as.Date(data_tmp[,1]))
return_tmp <- periodReturn(data_tmp,period="daily",type="log")
returns_tmp <- cbind(returns_tmp,return_tmp)
}
returns <- returns_tmp[,1:5]
rownames(returns) <- data[,1]
colnames(returns) <- c("fd1", "fd2", "fd3", "fd4","fd5")
funds <- colnames(returns)
init <- portfolio.spec(assets = funds)
init <- add.constraint(portfolio=init, type="leverage",min_sum=1.00, max_sum=1.00)
init <- add.constraint(portfolio=init, type="box", min=0.10, max=0.40)
#6.3 Minimize variance with ROI
minvar <- add.objective(portfolio=init, type="risk", name="var")
opt_minvar <- optimize.portfolio(R=returns, portfolio=minvar, optimize_method="ROI", trace=TRUE)
w = extractWeights(opt_minvar)
data_print_out_final = rbind(data_print_out,w)
return_data = data_print_out_final[,-1]
return_data = rbind(colnames(return_data),return_data)
return (return_data)
}
''')
import pandas as pd
import numpy as np
return_data = robjects.r['weights'](level)
return_data=list(return_data)
length=len(return_data[0])
## Change data type
data=np.array(return_data[:])
data=data[:,1:(length+100)].astype(float)
data=data.transpose()
## Construct an empty array
data_all = np.zeros((10, data.shape[0] - 1)) # here minus 1: space for weights
## A function to calculate the average
def weighted_avg(in_put, weight):
if np.ndim(in_put) == 1:
return np.sum(in_put * weight)
else:
size = in_put.shape[0]
tmp = np.zeros(size)
for i in range(0, size):
tmp[i] = np.sum(in_put[i, :] * weight)
return tmp
###initialization
nav = data[:-1, :] # here -1: space for weights
weight = data[-1, :]
date = nav.shape[0]
fund_num = nav.shape[1]
data_all = np.zeros((10, date)) # here minus 1: space for weights
###daily returns
daily_returns = np.zeros((date - 1, fund_num))
for j in range(0, fund_num):
daily_returns[:, j] = np.diff(nav[:, j]) / nav[:-1, j]
data_all[9, :-1] = weighted_avg(daily_returns, weight)
###---Step 1
###accumulative daily returns
accu_daily_returns = np.zeros((date - 1, fund_num))
for j in range(0, fund_num):
for i in range(0, date - 1):
accu_daily_returns[i, j] = float(nav[i + 1, j] / nav[0, j] - 1)
data_all[1, :-1] = weighted_avg(accu_daily_returns, weight)
###---Step 2
###annualized return & std
annual_return = ((1 + (data_all[1, -2])) ** (1 / (
date / 252)) - 1) # data_all[1,-2] is the second last number of the data_all row, which is the acumulative rate
data_all[2, 0] = annual_return
annual_std = np.std(data_all[9, :-1]) * np.sqrt(252)
data_all[2, 3] = annual_std
## Sharpe Ratio
SR = data_all[9].mean() / data_all[9].std()
data_all[2, 1]=SR
data_all[6, 0] = SR
## 最大回撤
index_j = np.argmax(np.maximum.accumulate(data_all[9]) - data_all[9]) # 结束位置
index_i = np.argmax(data_all[9][:index_j]) # 开始位置
d = data_all[9][index_j] - data_all[9][index_i] # 最大回撤
data_all[2, 2] = d
## 95% VAR
d_sort=data_all[9,:-1].copy()
d_sort.sort()
va=d_sort[int((date-1)*0.05)]
data_all[2, 4] = va
t2=[]
t2.append(data_all[2, 0])
t2.append(data_all[2, 1])
t2.append(data_all[2, 2])
t2.append(data_all[2, 3])
t2.append(data_all[2, 4])
###---Step 3: Chosen date return
chosen_return = | np.zeros((48, fund_num)) | numpy.zeros |
import random
import albumentations as A
import cv2
import numpy as np
import torch
__all__ = ["BoxesDropout", "CoarseDropoutWithBboxes"]
class BoxesDropout(A.DualTransform):
"""
Remove objects and fill image & mask corresponding to removed bboxes.
"""
def __init__(
self,
max_objects=5,
max_fraction=0.2,
image_fill_value=0,
mask_fill_value=0,
always_apply=False,
drop_overlapping_boxes=True,
overlap_iou=0.35,
p=0.5,
):
"""
Args:
max_objects: Maximum number of labels that can be zeroed out. Can be tuple, in this case it's [min, max]
image_fill_value: Fill value to use when filling image.
Can be 'inpaint' to apply inpaining (works only for 3-chahnel images)
mask_fill_value: Fill value to use when filling mask.
Targets:
image, mask
Image types:
uint8, float32
"""
super(BoxesDropout, self).__init__(always_apply, p)
self.max_objects = max_objects
self.max_fraction = max_fraction
self.image_fill_value = image_fill_value
self.mask_fill_value = mask_fill_value
self.drop_overlapping_boxes = drop_overlapping_boxes
self.overlap_iou = overlap_iou
@property
def targets_as_params(self):
return ["image", "bboxes"]
@property
def targets(self):
return {
"image": self.apply,
"mask": self.apply_to_mask,
"masks": self.apply_to_masks,
"bboxes": self.apply_to_bboxes,
"keypoints": self.apply_to_keypoints,
}
def get_params_dependent_on_targets(self, params):
from torchvision.ops import box_iou
image = params["image"]
rows, cols = image.shape[:2]
bboxes = A.denormalize_bboxes(params["bboxes"], rows, cols)
num_bboxes = len(bboxes)
max_num_objects_to_drop = min(self.max_objects, int(self.max_fraction * num_bboxes))
if max_num_objects_to_drop == 0:
dropout_mask = None
objects_to_drop = []
else:
indexes = np.arange(num_bboxes)
objects_to_drop = random.randint(1, max_num_objects_to_drop)
objects_to_drop = set(random.sample(indexes.tolist(), objects_to_drop))
if self.drop_overlapping_boxes:
bboxes = np.asarray([box[:4] for box in bboxes]).reshape((-1, 4))
iou: np.ndarray = box_iou(torch.from_numpy(bboxes), torch.from_numpy(bboxes)).numpy()
| np.fill_diagonal(iou, 0) | numpy.fill_diagonal |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 16:26:04 2018
@author: garrettsmith
Word-by-word SOSP sentence processing
The lexicon is a dictionary where the keys are attachment sites (head and
dependent) and the values are lists the features.
A treelet is a vector of head identities (phon. forms), head features, and
a fixed number of dependent features for each word.
DESIGN CHOICE: To allow for strings shorter than max_sent_length, added EMPTY
lexical item as placeholder. Short but fully linked parses, e.g., the->dog
EMPTY, are fully harmonious.
CHANGE TO PREV. CHOICE (5/24): Only parses with all required attachments are
now fully harmonious. This is done by implementing the at-most-one-hot rule
as a local constraint for each attch. site.
DESIGN CHOICE: A seq. of all EMPTYs has a local harmony of missing_link_penalty
DESIGN CHOICE: Ambiguous words are disambiguated in the lexicon file, but if
they share a phonological form, only a single phonological form is used for
making the dimension names. This is also how optional attachments can be
handled (although I haven't tried that yet).
DESIGN CHOICE: eliminating link patterns like L_W0_W1_d0 L_W1_W0_d0, i.e.,
"circular" link patterns.
DESIGN CHOICE: init. activ. patterns of ambiguous words are the average of
their ambiguous senses.
DESIGN CHOICE: a seq. of all EMPTYs is only penalized for its missing links
DESIGN CHOICE: When a new word is input, predictions/hallucinations about not-
yet-seen words are erased so that the system it always deflected away from an
attr. instead of immediately being at a, w0 w1 EMPTY (no link) low-harm. attr.
DESIGN CHOICE: Include a full lexicon, but if only want to consider particular
sequences, simply pass a corpus of those sequences.
DESIGN CHOICE: Using pullback: when a new word is input, the link strengths are
multiplied by a parameter self.pullback that weakens them (or turns them off).
Later maybe: Info about the expected direction of dependents would reduce the
number of dim. Also, after calculating harmonies, could eliminate very
low-harmony centers to simplify system.
For now at least, don't use root/apex node
"""
import yaml
from itertools import product
from sympy.utilities.iterables import multiset_permutations
import numpy as np
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import seaborn as sns
from .dynamics import calc_harmony, iterate, euclid_stop, vel_stop, cheb_stop
import pandas as pd
def gen_nlinks_vectors(nlink_dims, maxlinks):
lconfigs = []
for i in range(0, maxlinks+1):
base = [0]*nlink_dims
if i > 0:
base[:i] = [1]*i
perms = multiset_permutations(base)
lconfigs.extend([i for i in perms])
return lconfigs # chain(*lconfigs)
class Struct(object):
def __init__(self, lex_file=None, features=None, max_sent_length=10,
missing_link_cost=0.01, gamma=0.25,
stopping_crit='euclid_stop', corpus=None):
self.max_sent_length = max_sent_length
self.ndim_per_position = 0
# Maximum number of possible dependents; change to be fn. that calc.s
# after reading in lex.
self.ndep = 2
self.max_links = self.max_sent_length - 1
# Multiplier for missing links
self.missing_link_cost = missing_link_cost
self.gamma = gamma
if stopping_crit == 'vel_stop':
self.stopping_crit = vel_stop
elif stopping_crit == 'cheb_stop':
self.stopping_crit = cheb_stop
else:
self.stopping_crit = euclid_stop
# self.tau = 0.01 # Time step for discretized dynamics
self.tau = 0.1
self.max_time = 10000 # Max. number of time steps
self.noise_mag = 0.0001 # default
self.tol = 0.05 # Stopping tolerance
self.pullback = 0.0
if features is None:
self.features = ['Det', 'N', 'V', 'sg', 'pl']
self.nfeatures = len(self.features)
else:
self.features = features
self.nfeatures = len(features)
if lex_file is not None:
self.lexicon = self._import_lexicon(lex_file)
pf = []
for w in self.lexicon:
pf.append(self.lexicon[w]['phon_form'])
self.phon_forms = list(dict.fromkeys(pf))
self.nwords = len(self.lexicon)
self.nphon_forms = len(self.phon_forms)
self.pos_names = self._name_pos_dims()
self.link_names = self._name_links()
self.nlinks = len(self.link_names)
self.dim_names = self.pos_names + self.link_names
self.ndim = len(self.dim_names)
self.idx_words = {j: i for i, j in enumerate(self.lexicon.keys())}
self.idx_phon_feat = slice(0, self.nphon_forms)
self.idx_phon_dict = {j: i for i, j in enumerate(self.phon_forms)}
self.idx_head_feat = slice(self.nphon_forms, self.nphon_forms
+ self.nfeatures)
self.idx_links = slice(len(self.pos_names), len(self.dim_names))
self.word_vecs = self._make_word_vecs()
else:
print('No lexicon loaded')
self.lexicon = dict()
self.nwords = 0
self.dim_names = None
self.ndim = None
# Working with a corpus
if corpus is not None:
disamb = corpus.copy()
for seq in corpus:
# Disambiguating words
for word_nr, word in enumerate(seq):
ambig_forms = [w for w in self.lexicon if word in w]
if len(ambig_forms) > 1:
for amb in ambig_forms:
rep = [w if w is not word else amb for w in seq]
disamb.append(rep)
# del disamb[corpus.index(seq)]
del disamb[disamb.index(seq)]
# Also need to add partial subsequences from seqs in corpus
full_corp = disamb.copy()
for seq in disamb:
for i in range(len(seq)-1):
full_corp.append(seq[:i+1] + ['EMPTY']*(len(seq)-i-1))
corp_tuple = map(tuple, full_corp)
corp_unique = list(map(list, dict.fromkeys(corp_tuple)))
# self.seq_names = full_corp
self.seq_names = corp_unique
def set_params(self, **kwargs):
for param, val in kwargs.items():
setattr(self, param, val)
def _import_lexicon(self, file):
with open(file, 'r') as stream:
lex = yaml.safe_load(stream)
assert 'EMPTY' in lex.keys(), 'Lexicon must include EMPTY.'
return lex
def _make_word_vecs(self):
"""Builds word vecs, return them in a NumPy array
"""
word_list = []
for word in self.lexicon:
curr = []
word_phon = self.lexicon[word]['phon_form']
phon = [0.] * self.nphon_forms
phon[self.idx_phon_dict[word_phon]] = 1.0
curr.extend([i for i in phon])
curr.extend(self.lexicon[word]['head'])
if self.lexicon[word]['dependents'] is None:
curr.extend([-1.] * self.ndep * self.nfeatures)
else:
for dep in self.lexicon[word]['dependents']:
curr.extend(self.lexicon[word]['dependents'][dep])
ndeps = len(self.lexicon[word]['dependents'])
if ndeps > 0:
# Code non-existent features as -1s as placeholders
curr.extend([-1.] * (self.ndep-ndeps) * self.nfeatures)
word_list.append(curr)
return np.array(word_list)
def _name_seqs(self):
"""Finds all word sequences up to max_sentence_lengths. The centers
will be these with allowed link combinations appended (done later).
"""
# One approach: for each possible sequence of words, find all allowed
# feature/link combinations.
if self.seq_names:
word_seqs = self.seq_names
else:
non_empty = {k: self.lexicon[k] for k in self.lexicon
if k not in 'EMPTY'}
# For storing all possible sequences of words
word_seqs = []
# Manually adding the empty sequence
word_seqs.append(['EMPTY'] * self.max_sent_length)
for i in range(self.max_sent_length):
pr = product(non_empty, repeat=i+1)
word_seqs.extend([list(x) for x in pr])
for i in range(len(word_seqs)):
curr_len = len(word_seqs[i])
if curr_len < self.max_sent_length:
word_seqs[i].extend(['EMPTY'] * (self.max_sent_length
- curr_len))
return word_seqs
def _make_seq_vecs(self):
"""Returns a list of sequence vectors in which each element holds word
vectors concatenated together.
"""
word_vec = self._make_word_vecs()
seq_names = self._name_seqs()
seq_vecs = []
for seq in seq_names:
curr_seq = []
for word in seq:
curr_word = self.idx_words[word]
curr_seq.extend(word_vec[curr_word])
seq_vecs.append(curr_seq)
self.seq_vecs = seq_vecs
return seq_vecs
def _prune_links(self):
"""Returns an array of link vectors after removing the ones disallowed
under the constraints of SOSP
"""
link_names = self._name_links()
nlink_dims = len(link_names)
link_vecs = gen_nlinks_vectors(nlink_dims, self.max_links)
# A little kludgy, but works for now...
if self.max_sent_length == 2:
return(link_vecs)
to_rm = []
to_keep = []
for i, lvec in enumerate(link_vecs):
# Remove vectors that have the same word attached twice as a dep.
for word_nr in range(self.max_sent_length):
dim_per_word = self.ndep * (self.max_sent_length-1)
init = word_nr*dim_per_word
idx = slice(init, init+dim_per_word)
if sum(lvec[idx]) >= self.max_links:
to_rm.append(i)
# to_keep.append(lvec)
# Next, rm vectors with more than one thing attached to the
# same dep attch site.
for dep in ['d0', 'd1']:
word_str = 'W' + str(word_nr) + '_' + dep
dep_idx = [j for j, w in enumerate(link_names)
if word_str in w]
if sum([lvec[k] for k in dep_idx]) >= self.max_links:
to_rm.append(i)
# Now rm links that form cycles
for wn in range(self.max_sent_length-1):
w0 = wn
w1 = wn + 1
for d in ['d' + str(j) for j in range(self.ndep)]:
s0 = '_'.join(['L', 'W' + str(w0), 'W' + str(w1), d])
idx0 = link_names.index(s0)
s1 = '_'.join(['L', 'W' + str(w1), 'W' + str(w0), d])
idx1 = link_names.index(s1)
if lvec[idx0] == 1 and lvec[idx1] == 1:
to_rm.append(i)
# Finally, remove links that aren't possible with the vocabulary
return [link_vecs[k] for k in range(len(link_vecs)) if k not in to_rm]
def _name_links(self):
print('Naming links...')
links = []
for pos_nr in range(self.max_sent_length):
other_positions = [x for x in range(self.max_sent_length)
if x != pos_nr]
# Any word can appear at any position, so use whole lexicon here
for op in other_positions:
for dep in ['d0', 'd1']: # first and second dependents
links.append('_'.join(['L', 'W' + str(pos_nr),
'W' + str(op), dep]))
return links
def _name_pos_dims(self):
"""Returns a list of the dimension names. There are always ndep
dependents at a position regardless of what word is in that position.
Also only creates one phonological form for ambiguous words, like
'the_sg' and 'the_pl.'
"""
assert self.lexicon is not None, 'Must initialize lexicon.'
print('Naming position dimensions...')
per_position = []
for word in self.phon_forms:
per_position.append(word)
for feat in self.features:
per_position.append(feat)
for dep in range(self.ndep):
for feat in self.features:
per_position.append('d' + str(dep) + '_' + feat)
self.ndim_per_position = len(per_position)
all_names = []
for i in range(self.max_sent_length):
tmp = ['W' + str(i) + '_' + pf for pf in per_position]
for x in tmp:
all_names.append(x)
return all_names
def gen_centers(self):
"""Will return a NumPy array with a center on each row.
Because links are only care about sentence position and attch. site,
don't have to worry about what words are in the positions, except to
make sure they allow dependents.
Note: need to create 2 different centers when there's a 0.5 in the vec
"""
# Notes: link vec of zeros is always possible, no matter how many words
# have been input. No links turned on after reading first word.
# As words come in, can only allow centers with them attching somehow
# to previous words, not looking ahead.
seq_vecs = self._make_seq_vecs()
seq_names = self._name_seqs()
assert len(seq_vecs) == len(seq_names), \
'Number of sequence vectors mismatches number of sequence names.'
link_names = self._name_links()
link_vecs = self._prune_links()
centers = []
# Cycle through seqs and find allowed links
for seq_name, seq in zip(seq_names, seq_vecs):
curr_seq = seq.copy()
if seq_name[0] == 'EMPTY':
# Assumes 0th link vec is one with no links!
centers.append(curr_seq + link_vecs[0])
elif seq_name[1] == 'EMPTY':
centers.append(curr_seq + link_vecs[0])
else:
# Need to exclude attchs. to EMPTYs
try:
first_empty = seq_name.index('EMPTY')
empties = ['W' + str(i) for i in
range(first_empty, self.max_sent_length)]
# Indexing the dimensions that have links to EMPTYs
empty_idx = [i for i, ln in enumerate(link_names) for e in
empties if e in ln]
except ValueError:
empty_idx = []
to_rm = []
for lconfig in link_vecs:
for i in empty_idx:
if lconfig[i] != 0:
to_rm.append(lconfig)
# Now removing link configs if they link to a non-existent
# dependent
for word_nr, word in enumerate(seq_name):
if self.lexicon[word]['dependents'] is None:
null_attch = ['W' + str(word_nr) + '_' + 'd'
+ str(j) for j in range(self.ndep)]
null_idx = [i for i, ln in enumerate(link_names)
for n in null_attch if n in ln]
for lconfig in link_vecs:
for i in null_idx:
if lconfig[i] != 0:
to_rm.append(lconfig)
elif len(self.lexicon[word]['dependents']) < self.ndep:
null_attch = ['W' + str(word_nr) + '_' + 'd'
+ str(j) for j in
range(1, self.ndep)]
null_idx = [i for i, ln in enumerate(link_names)
for n in null_attch if n in ln]
for lconfig in link_vecs:
for i in null_idx:
if lconfig[i] != 0:
to_rm.append(lconfig)
# Now, removing links to/from EMPTYs
if word == 'EMPTY':
idx = [i for i, ln in enumerate(link_names)
if 'W' + str(word_nr) in ln]
for lconfig in link_vecs:
if any([lconfig[j] for j in idx]):
to_rm.append(lconfig)
# Finally, removing any link configs w/ multiple links to
# same attch. site
for lconfig in link_vecs:
mult_gov = [l for l in link_names if 'L_W' +
str(word_nr) in l]
idx = [i for i, ln in enumerate(self.link_names) if ln
in mult_gov]
if sum([lconfig[i] for i in idx]) >= 2:
to_rm.append(lconfig)
# Excluding to_rm
configs_to_use = [c for c in link_vecs if c not in to_rm]
for config in configs_to_use:
centers.append(curr_seq + config)
# Getting rid of duplicates
ctuple = map(tuple, centers)
centers_unique = list(dict.fromkeys(ctuple))
centers_array = np.array(centers_unique)
centers_array[centers_array < 0] = 0.0 # Getting rid of -1s
self.centers = centers_array
print('Number of centers generated: {}'.format(centers_array.shape[0]))
return
def which_nonzero(self, center):
"""Returns the names of the dimensions in a cetner that are non-zero.
"""
idx = list(np.where(center != 0)[0])
return [self.dim_names[i] for i in idx]
def look_up_center(self, active):
"""Returns the center (if it exists) that corresponds to the given
dimensions being active.
"""
idx = [self.dim_names.index(dim) for dim in active]
test_vec = [0] * len(self.dim_names)
for i in idx:
test_vec[i] = 1
if test_vec in self.centers.tolist():
lidx = np.where((self.centers == test_vec).all(axis=1))
lharmony = self.local_harmonies[lidx]
print('Local harmony: {}\nCenter number: {}\nCenter: {}'.format(
lharmony, lidx, test_vec))
else:
print('Active dimensions don\'t correspond to a center.')
return
def hamming_dist(self, vec0, vec1):
return sum(f0 != f1 for f0, f1 in zip(vec0, vec1))
def feat_match(self, vec0, vec1):
assert len(vec0) == len(vec1), 'Feature vectors not of equal length'
return 1 - (self.hamming_dist(vec0, vec1) / len(vec0))
def calculate_local_harmonies(self):
"""Cycle through the centers and use self.lexicon to look up features.
"""
local_harmonies = np.ones(self.centers.shape[0])
for c, center in enumerate(self.centers):
# Find which dims are active
nonzero = self.which_nonzero(self.centers[c])
# Getting active links from there
active_links = [nonzero[i] for i, dim in enumerate(nonzero)
if 'L_' in dim]
nempties = len([dim for dim in nonzero if 'EMPTY' in dim])
if nempties == self.max_sent_length:
# This is a choice:
local_harmonies[c] *= self.missing_link_cost**self.max_links
# pass
continue
for link in active_links:
# get locations of feat vecs
_, dep_word_nr, head_word_nr, head_dep = link.split('_')
# Just the position number
dep_nr = int(dep_word_nr[1])
dep_slice = slice(dep_nr * self.ndim_per_position
+ self.nphon_forms,
dep_nr * self.ndim_per_position
+ self.nphon_forms + self.nfeatures)
# Head features of the dependent treelet
v0 = center[dep_slice]
# Getting features of dependent attch. site on the head
head_str = '_'.join([head_word_nr, head_dep])
tmp = [i for i, x in enumerate(self.pos_names) if head_str
in x]
head_slice = slice(tmp[0], tmp[0] + self.nfeatures)
v1 = center[head_slice]
local_harmonies[c] *= self.feat_match(v0, v1)
# Penalizing missing links
active_words = [nonzero[i] for i, dim in enumerate(nonzero)
for ph in self.phon_forms if ph in dim]
spl = [item.split('_') for item in active_words]
assert max([len(it) for it in spl]) == 2, 'Error identifying words'
for pos, word in spl:
if word == 'EMPTY':
continue
ambig = [ph for ph in self.lexicon if word in ph]
if len(ambig) > 1:
for form in ambig:
head_feats = [self.features[i] for i, val in
enumerate(self.lexicon[form]['head'])
if val == 1]
hfeat_pos = [pos + '_' + x for x in head_feats]
# If this form isn't the one in the center, skip it
if not all(x in nonzero for x in hfeat_pos):
continue
if (self.lexicon[form]['gov_req'] and not
any(x for x in active_links if 'L_' + pos + '_W' in x)):
local_harmonies[c] *= self.missing_link_cost
if self.lexicon[form]['dependents'] is not None:
for dep_nr in range(len(self.lexicon[form]['dependents'])):
if (self.lexicon[form]['dep_req'][dep_nr] and
not any(x for x in active_links if pos +
'_d' + str(dep_nr) in x)):
local_harmonies[c] *= self.missing_link_cost
else:
if (self.lexicon[word]['gov_req'] and
not any(x for x in active_links if 'L_' + pos + '_W' in x)):
local_harmonies[c] *= self.missing_link_cost
if self.lexicon[word]['dependents'] is not None:
for dep_nr in range(len(self.lexicon[word]['dependents'])):
if (self.lexicon[word]['dep_req'][dep_nr] and not
any(x for x in active_links if pos + '_d' + str(dep_nr) in x)):
local_harmonies[c] *= self.missing_link_cost
# Old way: across-the-board, top-down penalty for too few links
# if len(active_links) < self.max_links - nempties:
# local_harmonies[c] *= (self.missing_link_cost **
# (self.max_links -
# len(active_links)))
self.local_harmonies = local_harmonies
return
def input_word(self, state_vec, word, pos):
"""Inputs a new word at a particular position by overwriting the values
of the state vector at the relevant positions.
"""
assert (pos + 1) <= self.max_sent_length, \
'Can only add words up to max_sent_length'
# First, get the feature vector(s) from the lexicon
ambig_words = [w for w in self.lexicon if word in w]
# Then, average them in case the word is ambiguous
word_vec = np.zeros(self.nfeatures)
for w in ambig_words:
word_vec += np.array(self.lexicon[w]['head'])
word_vec /= len(ambig_words)
# Getting dep. features
dep_feats = np.zeros(self.ndep * self.nfeatures)
for i, w in enumerate(ambig_words):
if self.lexicon[w]['dependents'] is not None:
idx = slice(i*self.nfeatures, i*self.nfeatures+self.nfeatures)
for d in self.lexicon[w]['dependents']:
# Getting avg. of deps in case the word has multiple senses
dep_feats[idx] += np.array(self.lexicon[w]['dependents'][d])
dep_feats /= len(ambig_words)
# Finally, turn on the averaged features at the correct possition
phon = np.zeros(self.nphon_forms)
phon[self.idx_phon_dict[word]] = 1.0
whole_vec = np.zeros(self.ndim_per_position * (self.max_sent_length
- pos))
whole_vec[:self.nphon_forms] = phon
whole_vec[self.nphon_forms:self.nphon_forms+self.nfeatures] = word_vec
whole_vec[self.nphon_forms+self.nfeatures:
self.nphon_forms+self.nfeatures+self.ndep*self.nfeatures] \
= dep_feats
updated_state = state_vec.copy()
start = pos*self.ndim_per_position
stop = self.ndim - self.nlinks
idx = slice(start, stop)
updated_state[idx] = whole_vec
updated_state[-self.nlinks:] *= self.pullback # Implementing pull-back
return updated_state
def neg_harmony(self, x, centers, local_harmonies, gamma):
return -1 * calc_harmony(x, centers, local_harmonies, gamma)
def jac_neg_harmony(self, x, centers, local_harmonies, gamma):
return -1 * iterate(x, centers, local_harmonies, gamma)
def locate_attrs(self):
"""Finds actual locations of attractors in the full harmony landscape
using the Newton-CG algorithm on the negative of the harmony fn.
"""
attrs = np.zeros(self.centers.shape)
for c in range(self.centers.shape[0]):
extremum = minimize(self.neg_harmony, self.centers[c],
args=(self.centers, self.local_harmonies,
self.gamma), method='L-BFGS-B',
jac=self.jac_neg_harmony)
attrs[c] = extremum.x
unique_attrs = np.unique(np.round(attrs, 2), axis=0)
self.attrs = unique_attrs
print('Found {} unique attractors from {} centers'.format(
self.attrs.shape[0], self.centers.shape[0]))
return
def _zero_state_hist(self):
self.state_hist = np.zeros((self.max_time, self.ndim))
def single_run(self, seq=None):
"""Run the model once until stopping criterion is met or
time runs out.
"""
assert seq is not None, 'Must provide a sequence of words.'
self._zero_state_hist()
self.harmony = np.zeros(self.max_time)
data = []
# Input the first word
curr_pos = 0
self.state_hist[0, ] = self.input_word(self.state_hist[0, ],
seq[curr_pos], curr_pos)
# Pre-generate the noise for speed
noise = (np.sqrt(2 * self.noise_mag * self.tau)
* np.random.normal(0, 1, self.state_hist.shape))
t = 0
word_t = 0 # for keeping track of max amt. of time ea. word can get
while t < self.max_time-1:
not_close = self.stopping_crit(self.state_hist[t], self.attrs,
self.tol)
if not_close:
self.state_hist[t+1, ] = (self.state_hist[t, ]
+ self.tau *
iterate(self.state_hist[t, ],
self.centers,
self.local_harmonies,
self.gamma)
+ noise[t, ])
self.harmony[t] = calc_harmony(self.state_hist[t, ],
self.centers,
self.local_harmonies,
self.gamma)
t += 1
word_t += 1
else:
data.append([curr_pos, seq[curr_pos], word_t])
try:
curr_pos += 1
self.state_hist[t+1, ] = (self.input_word(
self.state_hist[t, ],
seq[curr_pos], curr_pos))
self.harmony[t] = calc_harmony(self.state_hist[t, ],
self.centers,
self.local_harmonies,
self.gamma)
t += 1
word_t = 0
except:
trunc = self.state_hist[~np.all(self.state_hist == 0,
axis=1)]
return trunc[-1], data
trunc = self.state_hist[~np.all(self.state_hist == 0, axis=1)]
return trunc[-1], data
def many_runs(self, n_runs=100, seq=None):
"""Do repeated Monte Carlo runs. Returns a Pandas data frame with the
center number and settling time.
"""
print('Run number:')
data_list = []
for run in range(n_runs):
curr_data = []
if run % (n_runs // 10) == 0:
print('[{}] '.format(run), end='')
final_st, trial_data = self.single_run(seq)
for w in trial_data:
curr_data.append(w)
final_rounded = | np.rint(final_st) | numpy.rint |
import os
import numpy as np
import pytest
import xarray as xr
from xclim import atmos
from xclim.core.calendar import percentile_doy
from xclim.core.options import set_options
from xclim.core.units import convert_units_to
from xclim.testing import open_dataset
K2C = 273.15
class TestCSDI:
def test_simple(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn += K2C
tn[10:20] -= 2
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_convert_units(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn = tasmin_series(tn + K2C)
tn.attrs["units"] = "C"
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_nan_presence(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ K2C
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn[9] = np.nan
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert np.isnan(out[0])
class TestDTR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_DTR_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
dtr1 = max1 - min1
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
dtr = atmos.max_daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.max_daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].max(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestDTRVar:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range_variability(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range_variability(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
assert dtr.attrs["units"] == "K"
dtr1a = max1 - min1
dtr1 = abs(np.diff(dtr1a))
np.testing.assert_array_equal(dtr, dtrC)
# first month jan use 0:30 (n==30) because of day to day diff
assert np.allclose(dtr1[0:30].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestETR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
etr = atmos.extreme_temperature_range(tasmin, tasmax, freq="MS")
etrC = atmos.extreme_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
np.testing.assert_array_equal(etr, etrC)
etr1 = max1[0:31].max() - min1[0:31].min()
assert np.allclose(etr1, etr.values[0, 0, 0])
assert np.isnan(etr.values[1, 1, 0])
assert np.isnan(etr.values[0, -1, -1])
class TestTmean:
nc_files = (
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc"),
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc"),
)
def test_Tmean_3d_data(self):
ds_tmax = open_dataset(self.nc_files[0])
ds_tmin = open_dataset(self.nc_files[1])
tas = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C.values -= K2C
tas_C.attrs["units"] = "C"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas_C.values[180, 1, 0] = np.nan
tmmean = atmos.tg_mean(tas)
tmmeanC = atmos.tg_mean(tas_C)
x1 = tas.values[:, 0, 0]
tmmean1 = x1.mean()
# TODO: Investigate the differences between the two outputs.
# The conversion to K is done after / before the mean.
np.testing.assert_array_almost_equal(tmmeanC, tmmean, 3)
# test single point vs manual
assert np.allclose(tmmean1, tmmean.values[0, 0, 0], tmmeanC.values[0, 0, 0])
# test single nan point
assert np.isnan(tmmean.values[0, 1, 0])
# test all nan point
assert np.isnan(tmmean.values[0, -1, -1])
class TestTx:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_TX_3d_data(self):
tasmax = open_dataset(self.nc_file).tasmax
tasmax_C = open_dataset(self.nc_file).tasmax
tasmax_C.values -= K2C
tasmax_C.attrs["units"] = "C"
# put a nan somewhere
tasmax.values[180, 1, 0] = np.nan
tasmax_C.values[180, 1, 0] = np.nan
txmean = atmos.tx_mean(tasmax)
txmax = atmos.tx_max(tasmax)
txmin = atmos.tx_min(tasmax)
txmeanC = atmos.tx_mean(tasmax_C)
txmaxC = atmos.tx_max(tasmax_C)
txminC = atmos.tx_min(tasmax_C)
no_nan = (
~np.isnan(txmean).values & ~np.isnan(txmax).values & ~np.isnan(txmin).values
)
# test maxes always greater than mean and mean always greater than min (non nan values only)
assert np.all(txmax.values[no_nan] > txmean.values[no_nan]) & np.all(
txmean.values[no_nan] > txmin.values[no_nan]
)
np.testing.assert_array_almost_equal(txmeanC, txmean, 3)
np.testing.assert_array_equal(txminC, txmin)
np.testing.assert_array_equal(txmaxC, txmax)
x1 = tasmax.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, txmean.values[0, 0, 0], txmeanC.values[0, 0, 0])
assert np.allclose(txmax1, txmax.values[0, 0, 0], txmaxC.values[0, 0, 0])
assert np.allclose(txmin1, txmin.values[0, 0, 0], txminC.values[0, 0, 0])
# test single nan point
assert np.isnan(txmean.values[0, 1, 0])
assert np.isnan(txmin.values[0, 1, 0])
assert np.isnan(txmax.values[0, 1, 0])
# test all nan point
assert np.isnan(txmean.values[0, -1, -1])
assert np.isnan(txmin.values[0, -1, -1])
assert np.isnan(txmax.values[0, -1, -1])
class TestTn:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_TN_3d_data(self):
tasmin = open_dataset(self.nc_file).tasmin
tasmin_C = open_dataset(self.nc_file).tasmin
tasmin_C.values -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
tasmin_C.values[180, 1, 0] = np.nan
tnmean = atmos.tn_mean(tasmin)
tnmax = atmos.tn_max(tasmin)
tnmin = atmos.tn_min(tasmin)
tnmeanC = atmos.tn_mean(tasmin_C)
tnmaxC = atmos.tn_max(tasmin_C)
tnminC = atmos.tn_min(tasmin_C)
no_nan = (
~np.isnan(tnmean).values & ~np.isnan(tnmax).values & ~np.isnan(tnmin).values
)
# test maxes always greater than mean and mean alwyas greater than min (non nan values only)
assert np.all(tnmax.values[no_nan] > tnmean.values[no_nan]) & np.all(
tnmean.values[no_nan] > tnmin.values[no_nan]
)
np.testing.assert_array_almost_equal(tnmeanC, tnmean, 3)
np.testing.assert_array_equal(tnminC, tnmin)
np.testing.assert_array_equal(tnmaxC, tnmax)
x1 = tasmin.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, tnmean.values[0, 0, 0], tnmeanC.values[0, 0, 0])
assert np.allclose(txmax1, tnmax.values[0, 0, 0], tnmaxC.values[0, 0, 0])
assert np.allclose(txmin1, tnmin.values[0, 0, 0], tnminC.values[0, 0, 0])
# test single nan point
assert np.isnan(tnmean.values[0, 1, 0])
assert np.isnan(tnmin.values[0, 1, 0])
assert np.isnan(tnmax.values[0, 1, 0])
# test all nan point
assert np.isnan(tnmean.values[0, -1, -1])
assert np.isnan(tnmin.values[0, -1, -1])
assert np.isnan(tnmax.values[0, -1, -1])
class TestConsecutiveFrostDays:
def test_one_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [1])
def test_three_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_equal_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:9] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_events_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_convert_units_freeze_day(self, tasmin_series):
a = np.zeros(365) + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
ts.attrs["units"] = "C"
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_one_nan_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
a[-1] = np.nan
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [np.nan])
class TestConsecutiveFrostFreeDays:
def test_real_data(self, atmosds):
tasmin = atmosds.tasmin
test = atmos.maximum_consecutive_frost_free_days(tasmin)
np.testing.assert_allclose(test[2, 0], [68], rtol=1e-1)
assert (
"Annual maximum number of consecutive days with minimum daily temperature above or equal to 0 degc."
) in test.description
class TestFrostSeasonLength:
def test_simple(self, tasmin_series):
a = np.zeros(730) + K2C + 15
a[300:400] = K2C - 5
a[404:407] = K2C - 5
tasmin = tasmin_series(a, start="2000-01-01")
# Default, window = 5, mid_date = 07-01, freq= AS-JUL
out = atmos.frost_season_length(tasmin=tasmin)
np.testing.assert_array_equal(out, [np.nan, 107, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, window=3)
np.testing.assert_array_equal(out, [np.nan, 100, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, mid_date="07-01", freq="YS")
np.testing.assert_array_equal(out, [np.nan, np.nan])
class TestColdSpellDays:
def test_simple(self, tas_series):
a = np.zeros(365) + K2C
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_convert_units(self, tas_series):
a = np.zeros(365)
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
ts.attrs["units"] = "C"
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
| np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0]) | numpy.testing.assert_array_equal |
import inspect
import numpy as np
from primitives_ubc.regCCFS.src.utils.commonUtils import sVT
from primitives_ubc.regCCFS.src.utils.commonUtils import is_numeric
from primitives_ubc.regCCFS.src.utils.commonUtils import fastUnique
from primitives_ubc.regCCFS.src.utils.commonUtils import queryIfColumnsVary
from primitives_ubc.regCCFS.src.utils.commonUtils import queryIfOnlyTwoUniqueRows
from primitives_ubc.regCCFS.src.utils.ccfUtils import regCCA_alt
from primitives_ubc.regCCFS.src.utils.ccfUtils import random_feature_expansion
from primitives_ubc.regCCFS.src.utils.ccfUtils import genFeatureExpansionParameters
from primitives_ubc.regCCFS.src.training_utils.component_analysis import componentAnalysis
from primitives_ubc.regCCFS.src.training_utils.twopoint_max_marginsplit import twoPointMaxMarginSplit
import warnings
warnings.filterwarnings('ignore')
import logging
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------#
def setupLeaf(YTrain, bReg, options):
"""
Update tree struct to make node a leaf
"""
tree = {}
tree["bLeaf"] = True
tree["Npoints"] = YTrain.shape[0]
tree["mean"] = np.mean(YTrain, axis=0)
if bReg:
tree["std_dev"] = np.std(YTrain, axis=0, ddof=1)
# If a mapping has been applied, invert it
if not (options["org_stdY"].size == 0):
tree["mean"] = tree["mean"] * options["org_stdY"]
tree["std_dev"] = tree["std_dev"] * options["org_stdY"]
if not (options["org_muY"].size == 0):
tree["mean"] = tree["mean"] + options["org_muY"]
return tree
#-----------------------------------------------------------------------------#
def makeExpansionFunc(wZ, bZ, bIncOrig):
if bIncOrig:
f = lambda x: np.concatenate((x, random_feature_expansion(x, wZ, bZ)), axis=1)
else:
f = lambda x: random_feature_expansion(x, wZ, bZ)
return f
#-----------------------------------------------------------------------------#
def calc_mse(cumtotal, cumsq, YTrainSort):
value = np.divide(cumsq, sVT(np.arange(1, YTrainSort.shape[0]+1))) -\
np.divide(((cumtotal[0:-1, :])**2 + YTrainSort**2 + np.multiply(2 * cumtotal[0:-1, :], YTrainSort)),\
sVT(np.arange(1, YTrainSort.shape[0]+1)**2))
return value
#-------------------------------------------------------------------------------
def growCCT(XTrain, YTrain, bReg, options, iFeatureNum, depth):
"""
This function applies greedy splitting according to the CCT algorithm and the
provided options structure. Algorithm either returns a leaf or forms an
internal splitting node in which case the function recursively calls itself
for each of the children, eventually returning the corresponding subtree.
Parameters
----------
XTrain = Array giving training features. Data should be
processed using processInputData before being passed to
CCT
YTrain = Output data after formatting carried out by genCCF
bReg = Whether to perform regression instead of classification.
Default = false (i.e. classification).
options = Options class of type optionsClassCCF. Some fields are
updated during recursion
iFeatureNum = Grouping of features as per processInputData. During
recursion if a feature is found to be identical across
data points, the corresponding values in iFeatureNum are
replaced with NaNs.
depth = Current tree depth (zero based)
Returns
-------
tree = Structure containing learnt tree
"""
# Standard variables
eps = 2.2204e-16
# Set any missing required variables
if (options["mseTotal"]).size == 0:
options["mseTotal"] = YTrain.var(axis=0)
#---------------------------------------------------------------------------
# First do checks for whether we should immediately terminate
#---------------------------------------------------------------------------
N = XTrain.shape[0]
# Return if one training point, pure node or if options for returning
# fulfilled. A little case to deal with a binary YTrain is required.
bStop = (N < (np.amax([2, options["minPointsForSplit"], 2 * options["minPointsLeaf"]]))) or\
(is_numeric(options["maxDepthSplit"]) and depth > options["maxDepthSplit"])
if depth > 490 and (options["maxDepthSplit"] == 'stack'):
bStop = True
logging.warning('Reached maximum depth imposed by stack limitations!')
if bStop:
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
# Check if variance in Y is less than the cut off amount
varY = YTrain.var(axis=0)
if np.all(varY < (options["mseTotal"] * options["mseErrorTolerance"])):
tree = setupLeaf(YTrain, bReg, options)
return tree
#---------------------------------------------------------------------------
# Subsample features as required for hyperplane sampling
#---------------------------------------------------------------------------
iCanBeSelected = fastUnique(X=iFeatureNum)
iCanBeSelected = iCanBeSelected[~np.isnan(iCanBeSelected)]
lambda_ = np.min((iCanBeSelected.size, options["lambda"]))
indFeatIn = np.random.choice(int(iCanBeSelected.size), int(lambda_), replace=False)
iFeatIn = iCanBeSelected[indFeatIn]
bInMat = np.equal((iFeatureNum.flatten(order='F')[np.newaxis]), (np.sort(iFeatIn.flatten(order='F'))[np.newaxis]).T) # 1xk == nx1
iIn = (np.any(bInMat, axis=0)).ravel().nonzero()[0]
# Check for variation along selected dimensions and
# resample features that have no variation
bXVaries = queryIfColumnsVary(X=XTrain[:, iIn], tol=options["XVariationTol"])
if (not np.all(bXVaries)):
iInNew = iIn
nSelected = 0
iIn = iIn[bXVaries]
while (not np.all(bXVaries)) and lambda_ > 0:
iFeatureNum[iInNew[~bXVaries]] = np.nan
bInMat[:, iInNew[~bXVaries]] = False
bRemainsSelected = np.any(bInMat, axis=1)
nSelected = nSelected + bRemainsSelected.sum(axis=0)
iCanBeSelected = np.delete(iCanBeSelected, indFeatIn)
lambda_ = np.min((iCanBeSelected.size, options["lambda"]-nSelected))
if lambda_ < 1:
break
indFeatIn = np.random.choice(iCanBeSelected.size, size=int(lambda_), replace=False)
iFeatIn = iCanBeSelected[indFeatIn]
bInMat = np.equal((iFeatureNum.flatten(order='F')[np.newaxis]), (iFeatIn.flatten(order='F')[np.newaxis].T))
iInNew = (np.any(bInMat, axis=0)).ravel().nonzero()[0]
bXVaries = queryIfColumnsVary(X=XTrain[:, iInNew], tol=options["XVariationTol"])
iIn = np.sort(np.concatenate((iIn, iInNew[bXVaries])))
if iIn.size == 0:
# This means that there was no variation along any feature, therefore exit.
tree = setupLeaf(YTrain, bReg, options)
return tree
#---------------------------------------------------------------------------
# Projection bootstrap if required
#---------------------------------------------------------------------------
if options["bProjBoot"]:
iTrainThis = np.random.randint(N, size=(N, 1))
XTrainBag = XTrain[iTrainThis, iIn]
YTrainBag = YTrain[iTrainThis, :]
if len(YTrainBag.shape) > 2:
YTrainBag = np.squeeze(YTrainBag)
else:
XTrainBag = XTrain[:, iIn]
YTrainBag = YTrain
bXBagVaries = queryIfColumnsVary(X=XTrainBag, tol=options["XVariationTol"])
if (not np.any(bXBagVaries)) or\
(not bReg and YTrainBag.shape[1] > 1 and (np.sum(np.absolute(np.sum(YTrainBag, axis=0)) > 1e-12) < 2)) or\
(not bReg and YTrainBag.shape[1] == 1 and (np.any(np.sum(YTrainBag, axis=0) == np.array([0, YTrainBag.shape[0]])))) or\
(bReg and np.all(np.var(YTrainBag, axis=0) < (options["mseTotal"] * options["mseErrorTolerance"]))):
if (not options["bContinueProjBootDegenerate"]):
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
XTrainBag = XTrain[:, iIn]
YTrainBag = YTrain
#---------------------------------------------------------------------------
# Check for only having two points
#---------------------------------------------------------------------------
if (not (len(options["projections"]) == 0)) and ((XTrainBag.shape[0] == 2) or queryIfOnlyTwoUniqueRows(X=XTrainBag)):
bSplit, projMat, partitionPoint = twoPointMaxMarginSplit(XTrainBag, YTrainBag, options["XVariationTol"])
if (not bSplit):
tree = setupLeaf(YTrain, bReg, options)
return tree
else:
bLessThanTrain = np.dot(XTrain[:, iIn], projMat) <= partitionPoint
iDir = 0
else:
# Generate the new features as required
if options["bRCCA"]:
wZ, bZ = genFeatureExpansionParameters(XTrainBag, options["rccaNFeatures"], options["rccaLengthScale"])
fExp = makeExpansionFunc(wZ, bZ, options["rccaIncludeOriginal"])
XTrainBag = fExp(XTrainBag)
projMat, _, _ = regCCA_alt(XTrainBag, YTrainBag, options["rccaRegLambda"], options["rccaRegLambda"], 1e-8)
if projMat.size == 0:
projMat = np.ones((XTrainBag.shape[1], 1))
UTrain = np.dot(fExp(XTrain[:, iIn]), projMat)
else:
projMat, yprojMat, _, _, _ = componentAnalysis(XTrainBag, YTrainBag, options["projections"], options["epsilonCCA"])
UTrain = np.dot(XTrain[:, iIn], projMat)
#-----------------------------------------------------------------------
# Choose the features to use
#-----------------------------------------------------------------------
# This step catches splits based on no significant variation
bUTrainVaries = queryIfColumnsVary(UTrain, options["XVariationTol"])
if (not np.any(bUTrainVaries)):
tree = setupLeaf(YTrain, bReg, options)
return tree
UTrain = UTrain[:, bUTrainVaries]
projMat = projMat[:, bUTrainVaries]
if options["bUseOutputComponentsMSE"] and bReg and (YTrain.shape[1] > 1) and\
(not (yprojMat.size == 0)) and (options["splitCriterion"] == 'mse'):
VTrain = np.dot(YTrain, yprojMat)
#-----------------------------------------------------------------------
# Search over splits using provided method
#-----------------------------------------------------------------------
nProjDirs = UTrain.shape[1]
splitGains = np.empty((nProjDirs,1))
splitGains.fill(np.nan)
iSplits = np.empty((nProjDirs,1))
iSplits.fill(np.nan)
for nVarAtt in range(nProjDirs):
# Calculate the probabilities of being at each class in each of child
# nodes based on proportion of training data for each of possible
# splits using current projection
sort_UTrain = UTrain[:, nVarAtt].ravel()
UTrainSort = np.sort(sort_UTrain)
iUTrainSort = np.argsort(sort_UTrain)
bUniquePoints_ = np.diff(UTrainSort, n=1, axis=0)
bUniquePoints = np.concatenate((bUniquePoints_ > options["XVariationTol"], np.array([False])))
if options["bUseOutputComponentsMSE"] and bReg and YTrain.shape[1] > 1 and (not (yprojMat.size == 0)) and (options["splitCriterion"] == 'mse'):
VTrainSort = VTrain[iUTrainSort, :]
else:
VTrainSort = YTrain[iUTrainSort, :]
leftCum = np.cumsum(VTrainSort, axis=0)
if (YTrain.shape[1] ==1 or options["bSepPred"]) and (not bReg):
# Convert to [class_doesnt_exist,class_exists]
leftCum = np.concatenate((np.subtract(sVT(X=np.arange(0,N)), leftCum), leftCum))
rightCum = np.subtract(leftCum[-1, :], leftCum)
# Calculate the metric values of the current node and two child nodes
if options["splitCriterion"] == 'mse':
cumSqLeft = np.cumsum(VTrainSort**2)
cumSqLeft = np.expand_dims(cumSqLeft, axis=1)
varData = np.subtract((cumSqLeft[-1]/N), (leftCum[-1, :]/N)**2)
if np.all(varData < (options["mseTotal"] * options["mseErrorTolerance"])):
# Total variation is less then the allowed tolerance so
# terminate and construct a leaf
tree = setupLeaf(YTrain, bReg, options)
return tree
cumtotal_l = np.concatenate((np.zeros((1, VTrainSort.shape[1])), leftCum))
metricLeft = calc_mse(cumtotal=cumtotal_l, cumsq=cumSqLeft, YTrainSort=VTrainSort)
# For calculating the right need to go in additive order again
# so go from other end and then flip
end = cumSqLeft.shape[0] - 1
vend = VTrainSort.shape[0] - 1
metricRight = np.concatenate((np.zeros((1, VTrainSort.shape[1])),\
calc_mse(rightCum[::-1, :],\
np.subtract((cumSqLeft[-1, :][np.newaxis]), cumSqLeft[(end-1)::-1, :]),\
VTrainSort[vend:0:-1, :])))
metricRight = metricRight[::-1, :]
# No need to do the grouping for regression as each must be
# a seperate output anyway.
else:
assert (False), 'Invalid split criterion!'
metricCurrent = np.copy(metricLeft[-1, :])
metricLeft[~bUniquePoints, :] = np.inf
metricRight[~bUniquePoints, :] = np.inf
# Calculate gain in metric for each of possible splits based on current
# metric value minus metric value of child weighted by number of terms
# in each child
metricGain = np.subtract(metricCurrent,\
(np.multiply(sVT(np.arange(1,N+1, 1)), metricLeft)\
+np.multiply(sVT(np.arange(N-1, -1, -1)), metricRight))/N)
metricGain = np.round(metricGain, decimals=4)
# Combine gains if there are mulitple outputs. Note that for gini,
# info and mse, the joint gain is equal to the mean gain, hence
# taking the mean here rather than explicitly calculating joints before.
if metricGain.shape[1] > 1:
if is_numeric(options["taskWeights"]):
# If weights provided, weight task appropriately in terms of importance.
metricGain = np.multiply(metricGain, (options["taskWeights"].flatten(order='F')[np.newaxis])) # (nxk) .* (1*k)
multiTGC = options["multiTaskGainCombination"]
if multiTGC == 'mean':
metricGain = np.mean(metricGain, axis=1, keepdims=True)
elif multiTGC == 'max':
metricGain = np.max(metricGain, axis=1, keepdims=True)
else:
assert (False), 'Invalid option for options.multiTaskGainCombination!'
# Disallow splits that violate the minimum number of leaf points
end = (metricGain.shape[0]-1)
metricGain[0:(options["minPointsLeaf"]-1)] = -np.inf
metricGain[(end-(options["minPointsLeaf"]-1)):] = -np.inf # Note that end is never chosen anyway
# Randomly sample from equally best splits
iSplits[nVarAtt] = np.argmax(metricGain[0:-1])
splitGains[nVarAtt] = np.max(metricGain[0:-1])
iEqualMax = ((np.absolute(metricGain[0:-1] - splitGains[nVarAtt]) < (10*eps)).ravel().nonzero())[0]
if iEqualMax.size == 0:
iEqualMax = np.array([1])
iSplits[nVarAtt] = iEqualMax[np.random.randint(iEqualMax.size)]
# If no split gives a positive gain then stop
if np.max(splitGains) < 0:
tree = setupLeaf(YTrain, bReg, options)
return tree
# Establish between projection direction
maxGain = np.max(splitGains, axis=0)
iEqualMax = ((np.absolute(splitGains - maxGain) < (10 * eps)).ravel().nonzero())[0]
# Use given method to break ties
if options["dirIfEqual"] == 'rand':
iDir = iEqualMax[np.random.randint(iEqualMax.size)]
elif options["dirIfEqual"] == 'first':
if iEqualMax.size == 0:
iDir = 0
else:
iDir = iEqualMax[0]
else:
assert (False), 'invalid dirIfEqual!'
iSplit = (iSplits[iDir]).astype(int)
#-----------------------------------------------------------------------
# Establish partition point and assign to child
#-----------------------------------------------------------------------
UTrain = UTrain[:, iDir, np.newaxis]
UTrainSort = np.sort(UTrain, axis=0)
# The convoluted nature of the below is to avoid numerical errors
uTrainSortLeftPart = UTrainSort[iSplit]
UTrainSort = np.subtract(UTrainSort, uTrainSortLeftPart)
partitionPoint = np.add( | np.multiply(UTrainSort[iSplit], 0.5) | numpy.multiply |
import warnings
import numpy as np
import numpy.testing as npt
from dipy.data import get_fnames
from dipy.core.gradients import (gradient_table, GradientTable,
gradient_table_from_bvals_bvecs,
gradient_table_from_qvals_bvecs,
gradient_table_from_gradient_strength_bvecs,
WATER_GYROMAGNETIC_RATIO,
reorient_bvecs, generate_bvecs,
check_multi_b)
from dipy.io.gradients import read_bvals_bvecs
def test_btable_prepare():
sq2 = np.sqrt(2) / 2.
bvals = 1500 * np.ones(7)
bvals[0] = 0
bvecs = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[sq2, sq2, 0],
[sq2, 0, sq2],
[0, sq2, sq2]])
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
# bt.info
fimg, fbvals, fbvecs = get_fnames('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
bvecs = np.where(np.isnan(bvecs), 0, bvecs)
bt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bt.bvecs, bvecs)
bt2 = gradient_table(bvals, bvecs.T)
npt.assert_array_equal(bt2.bvecs, bvecs)
btab = np.concatenate((bvals[:, None], bvecs), axis=1)
bt3 = gradient_table(btab)
npt.assert_array_equal(bt3.bvecs, bvecs)
npt.assert_array_equal(bt3.bvals, bvals)
bt4 = gradient_table(btab.T)
npt.assert_array_equal(bt4.bvecs, bvecs)
npt.assert_array_equal(bt4.bvals, bvals)
# Test for proper inputs (expects either bvals/bvecs or 4 by n):
npt.assert_raises(ValueError, gradient_table, bvecs)
def test_GradientTable():
gradients = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[3, 4, 0],
[5, 0, 12]], 'float')
expected_bvals = np.array([0, 1, 1, 5, 13])
expected_b0s_mask = expected_bvals == 0
expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None]
gt = GradientTable(gradients, b0_threshold=0)
npt.assert_array_almost_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask)
npt.assert_array_almost_equal(gt.bvecs, expected_bvecs)
npt.assert_array_almost_equal(gt.gradients, gradients)
gt = GradientTable(gradients, b0_threshold=1)
npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0])
npt.assert_array_equal(gt.bvals, expected_bvals)
npt.assert_array_equal(gt.bvecs, expected_bvecs)
# checks negative values in gtab
npt.assert_raises(ValueError, GradientTable, -1)
npt.assert_raises(ValueError, GradientTable, | np.ones((6, 2)) | numpy.ones |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.