prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 19:27:58 2019
@author: narayanashanmukhavenkat
"""
import numpy as np
from nltk.corpus import stopwords
from gensim import corpora, models, similarities, matutils
from gensim.models import lsimodel, nmf
from gensim.models.coherencemodel import CoherenceModel
documents = []
for counter in range(1033):
temp = open(str(counter+1)+".txt", 'r')
documents.append(temp.read())
temp.close()
stop_words = stopwords.words('english')
texts = [[word for word in document.lower().split() if word not in stop_words] for document in documents]
dictionary = corpora.Dictionary(texts)
i=0
corpus = [dictionary.doc2bow(text) for text in texts]
#96351 total words
#20097 unique words
print(np.shape(np.array(corpus)))
#Convert document into the bag-of-words (BoW) format = list of (token_id, token_count) tuples.
corpora.MmCorpus.serialize('/tmp/ir.mm', corpus)
#lsi = models.LsiModel(corpus, num_topics=43, id2word = dictionary)
#print(np.shape(np.array(lsi.projection.u))) # - left singular vectors
#print(np.shape(np.array(lsi.projection.s)))
#print(np.shape(np.array(matutils.corpus2dense(lsi[corpus], len(lsi.projection.s)).T / lsi.projection.s)))
#print(np.shape(np.array(lsi[corpus])))
#np.matmul(np.matmul(np.array(lsi.projection.u), np.array(lsi.projection.s)),np.array(matutils.corpus2dense(lsi[corpus], len(lsi.projection.s)).T / lsi.projection.s))
#index = similarities.MatrixSimilarity(lsi[corpus])
doc = "medicosocial studies of hemophilia"
vec_bow = dictionary.doc2bow(doc.lower().split())
f = open("output.txt", "a")
#for i in range(0, lsi.num_topics):
# print(lsi.print_topic(i,10))
#vec_lsi1 = lsi[vec_bow]
#sims = index[vec_lsi1]
#sims = sorted(enumerate(sims), key=lambda item: -item[1])
#cm1 = CoherenceModel(model=lsi, corpus=corpus, coherence='u_mass')
#coherence = cm1.get_coherence()
#print('#####################################################')
#print(coherence)
#print(sims)
nmfmodel = nmf.Nmf(corpus, num_topics=43, id2word = dictionary, normalize =True)
for i in range(0, 43):
print(nmfmodel.print_topic(i,10))
print('#########################')
print("DOCUMENT TOPICS OF MEDICOSOCIAL STUDIES OF HEMOPHILIA")
print(nmfmodel.get_document_topics(vec_bow))
#print(nmfmodel._W)
#print(nmfmodel._h)
print( | np.array(nmfmodel._W) | numpy.array |
import numpy as np
from src.makeMolecule import conformer
from src.gaussian import gmm
import pickle
def get_gmm(save_folder, smiles_dict, t):
try:
if t == "representing":
with open(str(save_folder + "/gmm_dictionary.pickle"), "rb") as f:
gmm_dictionary = pickle.load(f)
print("Loaded the GMM data!")
smiles, conformers = make_conformers(smiles_dict)
return gmm_dictionary, smiles, conformers
else:
with open(str(save_folder + "/gmm_dictionary.pickle"), "rb") as f:
gmm_dictionary = pickle.load(f)
print("Loaded the GMM data!")
return gmm_dictionary
except FileNotFoundError:
if t == "training":
print("There is no file named gmm_dictionary.pickle included in {}.\nNew gaussian mixture models "
"will be created for all geometry features.\n".format(save_folder))
smiles, conformers = make_conformers(smiles_dict)
gmm_dictionary = gmm(smiles, conformers, save_folder)
return gmm_dictionary
else:
print("There is no gmm_dictionary.pickle included in {}".format(save_folder))
raise
def clean_dicts(smiles_dict, weight_dict):
lumps = list(smiles_dict.keys())
index_vector = [[i for i, e in enumerate(weight_dict[lump]) if e == 0] for lump in lumps]
for lump, dels in zip(lumps, index_vector):
weight_dict[lump] = np.delete(weight_dict[lump], dels)
smiles_dict[lump] = list(np.delete(smiles_dict[lump], dels))
print("Cleaned up the SMILES and weight dictionaries!")
return smiles_dict, weight_dict
def molecular_feature_collection(dataframe, smiles_dictionary):
# Headers should be: "CH", "Tboil", "Tboil code", "Tcrit", "Tcrit code", "d20", "d20 code", "pVap", "vap code"
ch_dict = {}
bp_dict = {}
tc_dict = {}
sg_dict = {}
vap_dict = {}
smiles = []
for lump in smiles_dictionary:
for smile in smiles_dictionary[lump]:
smiles.append(smile)
bp_to_predict = []
tc_to_predict = []
sg_to_predict = []
vap_to_predict = []
for smile in smiles:
ch_dict[smile] = dataframe["CH"][smile]
if dataframe["Tboil code"][smile] == 1:
bp_dict[smile] = dataframe["Tboil"][smile]
else:
bp_to_predict.append(smile)
if dataframe["Tcrit code"][smile] == 1 or dataframe["Tcrit code"][smile] == 2:
tc_dict[smile] = dataframe["Tcrit"][smile]
else:
tc_to_predict.append(smile)
if dataframe["d20 code"][smile] == "1,2":
sg_dict[smile] = dataframe["d20"][smile]
else:
sg_to_predict.append(smile)
if dataframe["vap code"][smile] == "1,2":
vap_dict[smile] = dataframe["pVap"][smile]
else:
vap_to_predict.append(smile)
return ch_dict, bp_dict, tc_dict, sg_dict, vap_dict, bp_to_predict, tc_to_predict, sg_to_predict, vap_to_predict
def mixture_features(ch_dict, bp_dict, tc_dict, sg_dict, vap_dict, smiles_dict):
features_dict = {}
for lump in smiles_dict:
for molecule in smiles_dict[lump]:
features_dict[molecule] = np.array([ch_dict[molecule], bp_dict[molecule],
tc_dict[molecule], sg_dict[molecule],
vap_dict[molecule]])
return features_dict
def absolute_fractions(compositions, weight_dict, smiles_dict, lumps):
all_fractions = []
for comp in compositions:
absolute_fraction = np.array([])
for i in range(len(comp)):
absolute_fraction = np.append(absolute_fraction, comp[i] * weight_dict[lumps[i]])
absolute_fraction /= | np.sum(absolute_fraction) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 09:44:30 2021
@author: erri
"""
import os
import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.colors import ListedColormap, BoundaryNorm
######################################################################################
# SETUP FOLDERS
######################################################################################
# setup working directory and DEM's name
# home_dir = '/home/erri/Documents/morphological_approach/3_output_data/q1.0_2_test/2_prc_laser/'
home_dir = '/home/erri/Documents/PhD/Research/2_raw_data/repeat_surveys_test/' # repeat_surveys_test
input_dir = os.path.join(home_dir, 'surveys')
# array mask for filtering data outside the channel domain
array_mask_name, array_mask_path = 'array_mask.txt', '/home/erri/Documents/morphological_approach/2_raw_data'
# TODO Check mask
# TODO Modificare la maschera sulla base dei nuovi output Laser [soglia a 12mm]
files=[] # initializing filenames list
# Creating array with file names:
for f in sorted(os.listdir(input_dir)):
path = os.path.join(input_dir, f)
if os.path.isfile(path) and f.endswith('.txt') and f.startswith('matrix_bed_norm'):
files = np.append(files, f)
# Perform difference over all combination of DEMs in the working directory
comb = [] # combination of differences
for h in range (0, len(files)-1):
for k in range (0, len(files)-1-h):
DEM1_name=files[h]
DEM2_name=files[h+1+k]
print(DEM2_name, '-', DEM1_name)
comb = np.append(comb, DEM2_name + '-' + DEM1_name)
# write DEM1 and DEM2 names below to avoid batch differences processing
DEM1_name = 'matrix_bed_norm_q07S5same.txt'
DEM2_name = 'matrix_bed_norm_q07S6same.txt'
# Specify DEMs path...
path_DEM1 = os.path.join(input_dir, DEM1_name)
path_DEM2 = os.path.join(input_dir, DEM2_name)
# ...and DOD name.
DoD_name = 'DoD_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] + '_'
# Output folder
output_name = 'script_outputs_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] # Set outputs name
output_dir = os.path.join(home_dir, 'DoDs_0.8') # Set outputs directory
path_out = os.path.join(output_dir, output_name) # Set outputs path
if os.path.exists(path_out): # Check if outputs path already exists
pass
else:
os.mkdir(output_dir)
os.mkdir(path_out)
##############################################################################
# SETUP SCRIPT PARAMETERS
##############################################################################
# Thresholds values
thrs_1 = 2.0 # [mm] # Lower threshold
thrs_2 = 15.0 # [mm] # Upper threshold
neigh_thrs = 5 # [-] # Number of neighborhood cells for validation
# Pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
# Not a number raster value (NaN)
NaN = -999
##############################################################################
# DATA READING...
##############################################################################
# Header initialization and extraction
lines = []
header = []
with open(path_DEM1, 'r') as file:
for line in file:
lines.append(line) # lines is a list. Each item is a row of the input file
# Header extraction...
for i in range(0, 7):
header.append(lines[i])
# Header printing in a file txt called header.txt
with open(path_out + '/' + DoD_name + 'header.txt', 'w') as head:
head.writelines(header)
##############################################################################
# DATA LOADING...
##############################################################################
DEM1 = np.loadtxt(path_DEM1,
# delimiter=',',
skiprows=8
)
DEM2 = np.loadtxt(path_DEM2,
# delimiter=',',
skiprows=8)
# Shape control:
arr_shape=min(DEM1.shape, DEM2.shape)
if not(DEM1.shape == DEM2.shape):
print('Attention: DEMs have not the same shape.')
# reshaping:
rows = min(DEM1.shape[0], DEM2.shape[0])
cols = min(DEM1.shape[1], DEM2.shape[1])
arr_shape = [rows, cols]
# and reshaping...
DEM1=DEM1[0:arr_shape[0], 0:arr_shape[1]]
DEM2=DEM2[0:arr_shape[0], 0:arr_shape[1]]
# Loading mask
array_mask = np.loadtxt(os.path.join(array_mask_path, array_mask_name))
# Reshaping mask
if not(array_mask.shape == arr_shape):
array_mask=array_mask[0:arr_shape[0], 0:arr_shape[1]]
array_msk = np.where(np.isnan(array_mask), 0, 1)
array_msk_nan = np.where(np.logical_not(np.isnan(array_mask)), 1, np.nan)
##############################################################################
# PERFORM DEM OF DIFFERENCE - DEM2-DEM1
##############################################################################
# Raster dimension
dim_x, dim_y = DEM1.shape
# Creating DoD array with np.nan
DoD_raw = np.zeros(DEM1.shape)
DoD_raw = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), np.nan, DEM2 - DEM1)
# Masking with array mask
DoD_raw = DoD_raw*array_msk_nan
# Creating GIS readable DoD array (np.nan as -999)
DoD_raw_rst = np.zeros(DoD_raw.shape)
DoD_raw_rst = np.where(np.isnan(DoD_raw), NaN, DoD_raw)
# Count the number of pixels in the channel area
DoD_count = np.count_nonzero(np.where(np.isnan(DoD_raw), 0, 1))
print('Active pixels:', DoD_count)
# DoD statistics
# print('The minimum DoD value is:\n', np.nanmin(DoD_raw))
# print('The maximum DoD value is:\n', np.nanmax(DoD_raw))
# print('The DoD shape is:\n', DoD_raw.shape)
##############################################################################
# DATA FILTERING...
##############################################################################
# Perform domain-wide average
domain_avg = np.pad(DoD_raw, 1, mode='edge') # i size pad with edge values domain
DoD_mean = np.zeros(DEM1.shape)
for i in range (0, dim_x):
for j in range (0, dim_y):
if np.isnan(DoD_raw[i, j]):
DoD_mean[i, j] = np.nan
else:
k = np.array([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]],
[domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2]],
[domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, j + 2]]])
w = np.array([[0, 1, 0],
[0, 2, 0],
[0, 1, 0]])
w_norm = w / (sum(sum(w))) # Normalizing weight matrix
DoD_mean[i, j] = np.nansum(k * w_norm)
# # Filtered array weighted average by nan.array mask
# DoD_mean = DoD_mean * array_msk_nan
# Create a GIS readable DoD mean (np.nan as -999)
DoD_mean_rst = np.where(np.isnan(DoD_mean), NaN, DoD_mean)
# Threshold and Neighbourhood analysis process
DoD_filt = np.copy(DoD_mean) # Initialize filtered DoD array as a copy of the averaged one
DoD_filt_domain = np.pad(DoD_filt, 1, mode='edge') # Create neighbourhood analysis domain
for i in range(0,dim_x):
for j in range(0,dim_y):
if abs(DoD_filt[i,j]) < thrs_1: # Set as "no variation detected" all variations lower than thrs_1
DoD_filt[i,j] = 0
if abs(DoD_filt[i,j]) >= thrs_1 and abs(DoD_filt[i,j]) <= thrs_2: # Perform neighbourhood analysis for variations between thrs_1 and thrs_2
# Create kernel
ker = np.array([[DoD_filt_domain[i, j], DoD_filt_domain[i, j + 1], DoD_filt_domain[i, j + 2]],
[DoD_filt_domain[i + 1, j], DoD_filt_domain[i + 1, j + 1], DoD_filt_domain[i + 1, j + 2]],
[DoD_filt_domain[i + 2, j], DoD_filt_domain[i + 2, j + 1], DoD_filt_domain[i + 2, j + 2]]])
if not((DoD_filt[i,j] > 0 and np.count_nonzero(ker > 0) >= neigh_thrs) or (DoD_filt[i,j] < 0 and np.count_nonzero(ker < 0) >= neigh_thrs)):
# So if the nature of the selected cell is not confirmed...
DoD_filt[i,j] = 0
# DoD_out = DoD_filt # * array_msk_nan
# Create a GIS readable filtered DoD (np.nann as -999)
DoD_filt_rst = np.where(np.isnan(DoD_filt), NaN, DoD_filt)
# Avoiding zero-surrounded pixel
DoD_filt_nozero=np.copy(DoD_filt) # Initialize filtered DoD array as a copy of the filtered one
zerosur_domain = np.pad(DoD_filt_nozero, 1, mode='edge') # Create analysis domain
for i in range(0,dim_x):
for j in range(0,dim_y):
if DoD_filt_nozero[i,j] != 0 and not(np.isnan(DoD_filt_nozero[i,j])): # Limiting the analysis to non-zero numbers
# Create kernel
ker = np.array([[zerosur_domain[i, j], zerosur_domain[i, j + 1], zerosur_domain[i, j + 2]],
[zerosur_domain[i + 1, j], zerosur_domain[i + 1, j + 1], zerosur_domain[i + 1, j + 2]],
[zerosur_domain[i + 2, j], zerosur_domain[i + 2, j + 1], zerosur_domain[i + 2, j + 2]]])
zero_count = np.count_nonzero(ker == 0) + np.count_nonzero(np.isnan(ker))
if zero_count == 8:
DoD_filt_nozero[i,j] = 0
else:
pass
# Masking DoD_filt_nozero to avoid
# Create GIS-readable DoD filtered and zero-surrounded avoided
DoD_filt_nozero_rst = np.where(np.isnan(DoD_filt_nozero), NaN, DoD_filt_nozero)
'''
Output files:
DoD_raw: it's just the dem of difference, so DEM2-DEM1
DoD_raw_rst: the same for DoD_raw, but np.nan=Nan
DoD_mean: DoD_raw with a smoothing along the Y axes, see the weight in the averaging process
DoD_mean_rst: the same for DoD_mean but np.nan=Nan
DoD_filt: DoD_mean with a neighbourhood analysis applie
DoD_filt_rst: the same for DoD_filt but np.nan=Nan
DoD_filt_nozero: DoD_filt with an avoiding zero-surrounded process applied
DoD_filt_nozero_rst: the same for DoD_filt_nozero but with np.nan=NaN
'''
##############################################################################
# PLOT RAW DOD, MEAN DOD AND FILTERED DOD
##############################################################################
# # Plot data using nicer colors
# colors = ['linen', 'lightgreen', 'darkgreen', 'maroon']
# class_bins = [-10.5, -1.5, 0, 1.5, 10.5]
# cmap = ListedColormap(colors)
# norm = BoundaryNorm(class_bins,
# len(colors))
# fig, (ax1, ax2, ax3) = plt.subplots(3,1)
# raw= ax1.imshow(DoD_raw, cmap=cmap, norm=norm)
# ax1.set_title('raw DoD')
# mean = ax2.imshow(DoD_mean_th1, cmap=cmap, norm=norm)
# ax2.set_title('mean DoD')
# filt = ax3.imshow(DoD_out, cmap=cmap, norm=norm)
# ax3.set_title('Filtered DoD')
# #fig.colorbar()
# fig.tight_layout()
# plt.show()
# plt.savefig(path_out + '/raster.pdf') # raster (png, jpg, rgb, tif), vector (pdf, eps), latex (pgf)
# #plt.imshow(DoD_out, cmap='RdYlGn')
##############################################################################
# VOLUMES
##############################################################################
# DoD filtered name: DoD_filt
# Create new raster where apply volume calculation
# DoD>0 --> Deposition, DoD<0 --> Scour
DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero)
DEP = (DoD_vol>0)*DoD_vol
SCO = (DoD_vol<0)*DoD_vol
# print(DEM2_name + '-' + DEM1_name)
print('Total volume [mm^3]:', np.sum(DoD_vol)*px_x*px_y)
print('Deposition volume [mm^3]:', np.sum(DEP)*px_x*px_y)
print('Scour volume [mm^3]:', np.sum(SCO)*px_x*px_y)
print()
# Active_pixel analysis
#Resize DoD fpr photos matching
active_pixel_count = DoD_vol[:,153:]
active_pixel_count = np.where(active_pixel_count!=0, 1, 0)
active_area = np.count_nonzero(active_pixel_count) *px_x*px_y
print('Area_active: ', active_area, '[mm**2]')
print()
print()
##############################################################################
# SAVE DATA
##############################################################################
# RAW DoD
# Print raw DoD in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f', delimiter='\t')
# Printing raw DoD in txt file (NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt='%0.1f', delimiter='\t')
# MEAN DoD
# Print DoD mean in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'mean.txt', DoD_mean , fmt='%0.1f', delimiter='\t')
# Print filtered DoD (with NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'mean_rst.txt', DoD_mean_rst , fmt='%0.1f', delimiter='\t')
# FILTERED DoD
# Print filtered DoD (with np.nan)...
np.savetxt(path_out + '/' + DoD_name + 'filt_.txt', DoD_filt, fmt='%0.1f', delimiter='\t')
# Print filtered DoD (with NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'filt_rst.txt', DoD_filt_rst, fmt='%0.1f', delimiter='\t')
# AVOIDED ZERO SURROUNDED DoD
# Print filtered DoD (with np.nan)...
| np.savetxt(path_out + '/' + DoD_name + 'nozero_.txt', DoD_filt_nozero, fmt='%0.1f', delimiter='\t') | numpy.savetxt |
"""Module :mod:`perslay.expe` provide experimental functions to run perslay."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
import os.path
import itertools
import h5py
from ast import literal_eval
from scipy.sparse import csgraph
from scipy.io import loadmat, savemat
from scipy.linalg import eigh
import datetime
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import gudhi as gd
import matplotlib.pyplot as plt
import pandas as pd
from six.moves import xrange
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import KFold, ShuffleSplit, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from perslay.perslay import perslay_channel
from perslay.preprocessing import preprocess
from perslay.utils import diag_to_dict, hks_signature, get_base_simplex, apply_graph_extended_persistence
from tensorflow import random_uniform_initializer as rui
class baseModel:
def __init__(self, filt_parameters, perslay_parameters, labels, combination=False):
self.filt_parameters = filt_parameters
self.perslay_parameters = perslay_parameters
self.num_labels = labels.shape[1]
self.num_filts = len(self.filt_parameters["names"])
self.combination = combination
def get_parameters(self):
return [self.filt_parameters, self.perslay_parameters, self.combination]
def instance(self, indxs, feats, diags):
if self.filt_parameters["learn"]:
lpd = tf.load_op_library("persistence_diagram.so")
hks = tf.load_op_library("hks.so")
import _persistence_diagram_grad
import _hks_grad
H, T = np.array(self.filt_parameters["homology"]), np.array(self.filt_parameters["thresholds"])
N, I = np.array([[self.num_filts]]), np.array(self.filt_parameters["init"], dtype=np.float32)
cumsum = np.cumsum(np.array([0] + [thr for thr in T[:,0]]))
times = tf.get_variable("times", initializer=I)
conn = hks.heat_kernel_signature(indxs, times)
pdiag_array, _ = lpd.persistence_diagram(H, T, indxs, N, conn)
pds = tf.reshape(pdiag_array, [-1, cumsum[-1], 3])
pdiags = [pds[:,cumsum[i]:cumsum[i+1],:] for i in range(self.num_filts)]
else:
pdiags = diags
list_v = []
if self.combination:
n_pl = len(self.perslay_parameters)
alpha = tf.get_variable("perslay_coeffs", initializer=np.array(np.ones(n_pl), dtype=np.float32))
for i in range(self.num_filts):
# A perslay channel must be defined for each type of persistence diagram.
# Here it is a linear combination of several pre-defined layers.
list_dgm = []
for prm in range(n_pl):
perslay_channel(output = list_dgm, # list used to store all outputs
name = "perslay-" + str(i), # name of this layer
diag = pdiags[i], # i-th type of diagrams
**self.perslay_parameters[prm])
list_dgm = [tf.multiply(alpha[idx], tf.layers.batch_normalization(dgm))
for idx, dgm in enumerate(list_dgm)]
list_v.append(tf.math.add_n(list_dgm))
else:
if type(self.perslay_parameters) is not list:
for i in range(self.num_filts):
# A perslay channel must be defined for each type of persistence diagram.
# Here they all have the same hyper-parameters.
perslay_channel(output = list_v, # list used to store all outputs
name = "perslay-" + str(i), # name of this layer
diag = pdiags[i], # i-th type of diagrams
**self.perslay_parameters)
else:
for i in range(self.num_filts):
# A perslay channel must be defined for each type of persistence diagram.
# Here they all have the same hyper-parameters.
perslay_channel(output = list_v, # list used to store all outputs
name = "perslay-" + str(i), # name of this layer
diag = pdiags[i], # i-th type of diagrams
**self.perslay_parameters[i])
# Concatenate all channels and add other features
with tf.variable_scope("perslay"):
representations = tf.concat(list_v, 1)
with tf.variable_scope("norm_feat"):
feat = tf.layers.batch_normalization(feats)
final_representations = tf.concat([representations, feat], 1)
# Final layer to make predictions
with tf.variable_scope("final-dense"):
logits = tf.layers.dense(final_representations, self.num_labels)
return representations, logits
def load_config(filepath):
with open(filepath, "r") as fp:
lines = fp.readlines()
dataset_type = lines[0][:-1]
filt_parameters = literal_eval(lines[1])
perslay_parameters = literal_eval(lines[2])
combs = literal_eval(lines[3])
optim_parameters = literal_eval(lines[4])
optim_parameters["balanced"] = False
for k in perslay_parameters.keys():
if k[-4:] == "init":
a, b = perslay_parameters[k][0], perslay_parameters[k][1]
perslay_parameters[k] = rui(a, b)
return dataset_type, filt_parameters, perslay_parameters, combs, optim_parameters
# filtrations and features generation for datasets in the paper
def generate_diag_and_features(dataset, path_dataset=""):
path_dataset = "./data/" + dataset + "/" if not len(path_dataset) else path_dataset
filepath = path_dataset + dataset + ".conf"
dataset_type, filt_parameters, thresh, perslay_parameters, optim_parameters = load_config(filepath=filepath)
if "REDDIT" in dataset:
print("Unfortunately, REDDIT data are not available yet for memory issues.\n")
print("Moreover, the link we used to download the data,")
print("http://www.mit.edu/~pinary/kdd/datasets.tar.gz")
print("is down at the commit time (May 23rd).")
print("We will update this repository when we figure out a workaround.")
return
# if "REDDIT" in dataset:
# _prepreprocess_reddit(dataset)
if os.path.isfile(path_dataset + dataset + ".hdf5"):
os.remove(path_dataset + dataset + ".hdf5")
diag_file = h5py.File(path_dataset + dataset + ".hdf5")
list_filtrations = filt_parameters["names"]
[diag_file.create_group(str(filtration)) for filtration in filt_parameters["names"]]
list_hks_times = np.unique([filtration.split("_")[1] for filtration in list_filtrations])
if dataset_type == "graph":
# preprocessing
pad_size = 1
for graph_name in os.listdir(path_dataset + "mat/"):
A = np.array(loadmat(path_dataset + "mat/" + graph_name)["A"], dtype=np.float32)
pad_size = np.max((A.shape[0], pad_size))
features = pd.DataFrame(index=range(len(os.listdir(path_dataset + "mat/"))), columns=["label"] + ["eval" + str(i) for i in range(pad_size)] + [name + "-percent" + str(i) for name, i in itertools.product([f for f in list_hks_times if "hks" in f], 10 * np.arange(11))])
for idx, graph_name in enumerate((os.listdir(path_dataset + "mat/"))):
name = graph_name.split("_")
gid = int(name[name.index("gid") + 1]) - 1
A = np.array(loadmat(path_dataset + "mat/" + graph_name)["A"], dtype=np.float32)
num_vertices = A.shape[0]
label = int(name[name.index("lb") + 1])
L = csgraph.laplacian(A, normed=True)
egvals, egvectors = eigh(L)
basesimplex = get_base_simplex(A)
eigenvectors = np.zeros([num_vertices, pad_size])
eigenvals = np.zeros(pad_size)
eigenvals[:min(pad_size, num_vertices)] = np.flipud(egvals)[:min(pad_size, num_vertices)]
eigenvectors[:, :min(pad_size, num_vertices)] = np.fliplr(egvectors)[:, :min(pad_size, num_vertices)]
graph_features = []
graph_features.append(eigenvals)
for fhks in list_hks_times:
hks_time = float(fhks.split("-")[0])
# persistence
filtration_val = hks_signature(egvectors, egvals, time=hks_time)
dgmOrd0, dgmExt0, dgmRel1, dgmExt1 = apply_graph_extended_persistence(A, filtration_val, basesimplex)
diag_file["Ord0_" + str(hks_time) + "-hks"].create_dataset(name=str(gid), data=dgmOrd0)
diag_file["Ext0_" + str(hks_time) + "-hks"].create_dataset(name=str(gid), data=dgmExt0)
diag_file["Rel1_" + str(hks_time) + "-hks"].create_dataset(name=str(gid), data=dgmRel1)
diag_file["Ext1_" + str(hks_time) + "-hks"].create_dataset(name=str(gid), data=dgmExt1)
# features
graph_features.append(np.percentile(hks_signature(eigenvectors, eigenvals, time=hks_time), 10 * np.arange(11)))
features.loc[gid] = np.insert(np.concatenate(graph_features), 0, label)
features['label'] = features['label'].astype(int)
elif dataset_type == "orbit":
def _gen_orbit(num_pts_per_orbit, param):
X = np.zeros([num_pts_per_orbit, 2])
xcur, ycur = np.random.rand(), np.random.rand()
for idx in range(num_pts_per_orbit):
xcur = (xcur + param * ycur * (1. - ycur)) % 1
ycur = (ycur + param * xcur * (1. - xcur)) % 1
X[idx, :] = [xcur, ycur]
return X
labs = []
count = 0
num_diag_per_param = 1000 if "5K" in dataset else 20000
for lab, r in enumerate([2.5, 3.5, 4.0, 4.1, 4.3]):
print("Generating", num_diag_per_param, "orbits and diagrams for r = ", r, "...")
for dg in range(num_diag_per_param):
X = _gen_orbit(num_pts_per_orbit=1000, param=r)
alpha_complex = gd.AlphaComplex(points=X)
simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=1e50)
simplex_tree.persistence()
diag_file["Alpha0"].create_dataset(name=str(count),
data=np.array(simplex_tree.persistence_intervals_in_dimension(0)))
diag_file["Alpha1"].create_dataset(name=str(count),
data=np.array(simplex_tree.persistence_intervals_in_dimension(1)))
orbit_label = {"label": lab, "pcid": count}
labs.append(orbit_label)
count += 1
labels = pd.DataFrame(labs)
labels.set_index("pcid")
features = labels[["label"]]
features.to_csv(path_dataset + dataset + ".csv")
return diag_file.close()
# notebook utils
def load_diagfeatlabels(dataset, path_dataset="", filtrations=[], verbose=False):
path_dataset = "./data/" + dataset + "/" if not len(path_dataset) else path_dataset
diagfile = h5py.File(path_dataset + dataset + ".hdf5", "r")
filts = list(diagfile.keys()) if len(filtrations) == 0 else filtrations
feat = pd.read_csv(path_dataset + dataset + ".csv", index_col=0, header=0)
diag = diag_to_dict(diagfile, filts=filts)
# Extract and encode labels with integers
L = np.array(LabelEncoder().fit_transform(np.array(feat["label"])))
L = OneHotEncoder(sparse=False, categories="auto").fit_transform(L[:, np.newaxis])
# Extract features
F = np.array(feat)[:, 1:] # 1: removes the labels
if verbose:
print("Dataset:", dataset)
print("Number of observations:", L.shape[0])
print("Number of classes:", L.shape[1])
return diag, F, L
# learning utils
def _create_batches(indices, feed_dict, num_tower, tower_size, random=False, balanced=True, labels=np.empty([0,0])):
batches = []
if balanced:
num_labs = labels.shape[1]
tower_size = tower_size - (tower_size % num_labs)
batch_size = num_tower * tower_size
I = []
for l in range(num_labs):
I.append(np.argwhere(labels[:,l]==1)[:,0])
pts_per_lab = min([len(idxs) for idxs in I])
data_num_pts = num_labs * pts_per_lab
batch_size_lab = int(batch_size / num_labs)
residual = pts_per_lab % batch_size_lab
nbsplit = int((pts_per_lab - residual) / batch_size_lab)
split = np.split(np.arange(pts_per_lab - residual), nbsplit) if nbsplit > 0 else []
if random:
for l in range(num_labs):
np.random.shuffle(I[l])
for i in range(nbsplit):
feed_sub = dict()
for k in feed_dict.keys():
FS = []
for l in range(num_labs):
FS.append(feed_dict[k][I[l][split[i]]])
FS = np.vstack(FS)
np.random.shuffle(FS)
feed_sub[k] = FS
batches.append(feed_sub)
if residual > 0:
st, sz = pts_per_lab - residual, residual - (residual % num_tower)
feed_sub = dict()
for k in feed_dict.keys():
FS = []
for l in range(num_labs):
FS.append(feed_dict[k][I[l][np.arange(st, st + sz)]])
FS = np.vstack(FS)
np.random.shuffle(FS)
feed_sub[k] = FS
batches.append(feed_sub)
else:
batch_size = num_tower * tower_size
data_num_pts = len(indices)
residual = data_num_pts % batch_size
nbsplit = int((data_num_pts - residual) / batch_size)
split = np.split(np.arange(data_num_pts - residual), nbsplit) if nbsplit > 0 else []
if random:
perm = np.random.permutation(data_num_pts)
for i in range(nbsplit):
feed_sub = dict()
for k in feed_dict.keys():
feed_sub[k] = feed_dict[k][perm[split[i]]] if random else feed_dict[k][split[i]]
batches.append(feed_sub)
if residual > 0:
st, sz = data_num_pts - residual, residual - (residual % num_tower)
feed_sub = dict()
for k in feed_dict.keys():
feed_sub[k] = feed_dict[k][perm[np.arange(st, st + sz)]] if random else feed_dict[k][np.arange(st, st + sz)]
batches.append(feed_sub)
return batches
def _evaluate_nn_model(LB, FT, DG, train_sub, test_sub, model, optim_parameters, verbose=True):
num_tower, tower_type, num_epochs, decay, learning_rate, tower_size, optimizer, balanced = optim_parameters["num_tower"], optim_parameters["tower_type"], optim_parameters["num_epochs"], optim_parameters["decay"], optim_parameters["learning_rate"], optim_parameters["tower_size"], optim_parameters["optimizer"], optim_parameters["balanced"]
tf.reset_default_graph()
with tf.device("/cpu:0"):
num_pts, num_labels, num_features, num_filt = LB.shape[0], LB.shape[1], FT.shape[1], len(DG)
# Neural network input
indxs = tf.placeholder(shape=[None, 1], dtype=tf.int32)
label = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
feats = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
diags = [tf.placeholder(shape=[None, DG[dt].shape[1], DG[dt].shape[2]], dtype=tf.float32) for dt in range(num_filt)]
# Optimizer
gs = tf.Variable(0, trainable=False)
if decay > 0:
decay_steps, decay_rate, staircase = optim_parameters["decay_steps"], optim_parameters["decay_rate"], optim_parameters["staircase"]
lr = tf.train.exponential_decay(learning_rate=learning_rate, global_step=gs, decay_steps=decay_steps, decay_rate=decay_rate, staircase=staircase)
else:
lr = learning_rate
if optimizer == "adam":
epsilon = optim_parameters["epsilon"]
opt = tf.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon)
elif optimizer == "gradient_descent":
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
elif optimizer == "rmsprop":
opt = tf.train.RMSPropOptimizer(learning_rate=lr)
sp_indxs = tf.split(indxs, num_or_size_splits=num_tower, axis=0)
sp_label = tf.split(label, num_or_size_splits=num_tower, axis=0)
sp_feats = tf.split(feats, num_or_size_splits=num_tower, axis=0)
sp_diags = [tf.split(diags[dt], num_or_size_splits=num_tower, axis=0) for dt in range(num_filt)]
# Neural network is built by placing a graph on each computing unit (tower)
# Calculate the gradients for each model tower
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
accuracy = 0
for i in xrange(num_tower):
with tf.device("/" + tower_type + ":" + str(i)):
with tf.name_scope("tower_" + str(i)): # as scope:
# Get split corresponding to tower
tow_indxs, tow_label, tow_feats, tow_diags = sp_indxs[i], sp_label[i], sp_feats[i], [
sp_diags[dt][i] for dt in range(num_filt)]
# Apply model
representations, tow_logit = model.instance(tow_indxs, tow_feats, tow_diags)
# Compute train loss and accuracy on this tower
tow_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tow_logit, 1), tf.argmax(tow_label, 1)), dtype=tf.float32))
tow_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tow_label, logits=tow_logit))
# for v in tf.trainable_variables():
# tow_loss += tf.nn.l2_loss(v)
accuracy += tow_acc * (1 / num_tower)
tf.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch of data on this tower
grads = opt.compute_gradients(tow_loss)
# Keep track of the gradients across all towers
tower_grads.append(grads)
# Calculate the mean of each gradient, this is the synchronization point across all towers
grads = []
# Each grad_and_vars looks like the following: ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
for grad_and_vars in zip(*tower_grads):
gr = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below
gr.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.reduce_mean(tf.concat(axis=0, values=gr), 0)
# Keep in mind that the Variables are redundant because they are shared across towers,
# so we just return the first tower's pointer to the Variable
grads.append((grad, grad_and_vars[0][1]))
# Apply the gradients to adjust the shared variables
apply_gradient_op = opt.apply_gradients(grads, global_step=None)
increase_global_step = gs.assign_add(1)
model_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
is_training = tf.get_variable("is_training", shape=(), dtype=tf.bool,
initializer=tf.constant_initializer(True, dtype=tf.bool))
# Create EMA object and update internal variables after optimization step
ema = tf.train.ExponentialMovingAverage(decay=decay)
with tf.control_dependencies([apply_gradient_op]):
train_op = ema.apply(model_vars)
# Create backup for trainable variables
with tf.variable_scope("BackupVariables"):
backup_vars = [tf.get_variable(var.op.name, dtype=var.value().dtype, trainable=False,
initializer=var.initialized_value()) for var in model_vars]
def to_training():
tf.assign(is_training, True)
return tf.group(*(tf.assign(var, bck.read_value()) for var, bck in zip(model_vars, backup_vars)))
def to_testing():
tf.assign(is_training, False)
tf.group(*(tf.assign(bck, var.read_value()) for var, bck in zip(model_vars, backup_vars)))
return tf.group(*(tf.assign(var, ema.average(var).read_value()) for var in model_vars))
switch_to_train_mode_op = tf.cond(is_training, true_fn=lambda: tf.group(), false_fn=to_training)
switch_to_test_mode_op = tf.cond(is_training, true_fn=to_testing, false_fn=lambda: tf.group())
# Create train and test indices
train_sub = train_sub[:len(train_sub) - (len(train_sub) % num_tower)]
test_sub = test_sub[:len(test_sub) - (len(test_sub) % num_tower)]
train_num_pts, test_num_pts, = len(train_sub), len(test_sub)
# Create train and test input dictionaries for Tensorflow
feed_train, feed_test = dict(), dict()
feed_train[indxs], feed_test[indxs] = train_sub[:, np.newaxis], test_sub[:, np.newaxis]
feed_train[label], feed_test[label] = LB[train_sub, :], LB[test_sub, :]
feed_train[feats], feed_test[feats] = FT[train_sub, :], FT[test_sub, :]
for dt in range(num_filt):
feed_train[diags[dt]], feed_test[diags[dt]] = DG[dt][train_sub, :], DG[dt][test_sub, :]
# Create test batches
train_batches_eval = _create_batches(train_sub, feed_train, num_tower, tower_size, False, False)
test_batches = _create_batches(test_sub, feed_test, num_tower, tower_size, False, False)
# Build an initialization operation to run below
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to True to build towers on GPU, since some of the ops do not have GPU implementations.
# For GPU debugging, one may want to add in ConfigProto arguments: log_device_placement=True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Initialize parameters
sess.run(init)
sess.run(switch_to_train_mode_op)
weights, times = [[] for _ in range(model.num_filts)], []
perslay_parameters = model.get_parameters()[1]
if not model.get_parameters()[2]:
for nf in range(model.num_filts):
weight_fun = perslay_parameters["persistence_weight"] if type(perslay_parameters) == dict else perslay_parameters[nf]["persistence_weight"]
if weight_fun == "grid":
weights[nf].append(np.flip(sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-grid_pweight/W")[0]).T, 0))
if weight_fun == "gmix":
means = sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-gmix_pweight/M")[0])
varis = sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-gmix_pweight/V")[0])
weights[nf].append((means,varis))
list_train_accs, list_test_accs = [], []
# Training with optimization of parameters
for epoch in xrange(num_epochs):
# Create random train batches
if balanced:
train_batches = _create_batches(train_sub, feed_train, num_tower, tower_size, True, True, LB[train_sub, :])
else:
train_batches = _create_batches(train_sub, feed_train, num_tower, tower_size, True, False, LB[train_sub, :])
# Apply gradient descent
for feed_batch in train_batches:
sess.run(train_op, feed_dict=feed_batch)
sess.run(increase_global_step)
if not model.get_parameters()[2]:
# Retrieve weight matrices
for nf in range(model.num_filts):
weight_fun = perslay_parameters["persistence_weight"] if type(perslay_parameters) == dict else perslay_parameters[nf]["persistence_weight"]
if weight_fun == "grid":
weights[nf].append(np.flip(sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-grid_pweight/W")[0]).T, 0))
if weight_fun == "gmix":
means = sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-gmix_pweight/M")[0])
varis = sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "perslay-" + str(nf) + "-gmix_pweight/V")[0])
weights[nf].append((means,varis))
# Retrieve times
if model.get_parameters()[0]["learn"]:
times.append(np.array(sess.run(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "times")[0])))
# Switch to test mode and evaluate train and test accuracy
sess.run(switch_to_test_mode_op)
train_acc, test_acc = 0, 0
for feed_batch in train_batches_eval:
train_acc += 100 * accuracy.eval(feed_dict=feed_batch) * (feed_batch[label].shape[0] / train_num_pts)
for feed_batch in test_batches:
test_acc += 100 * accuracy.eval(feed_dict=feed_batch) * (feed_batch[label].shape[0] / test_num_pts)
if (epoch+1) % 10 == 0 and verbose:
print("Epoch: {:d}, train acc: {:04.1f}, test acc: {:04.1f}".format(epoch+1, train_acc, test_acc))
list_train_accs.append(train_acc)
list_test_accs.append(test_acc)
# Go back to train mode
sess.run(switch_to_train_mode_op)
tr_repres = (representations.eval(feed_dict=feed_train), feed_train[label])
te_repres = (representations.eval(feed_dict=feed_test), feed_test[label])
if model.get_parameters()[0]["learn"] and type(perslay_parameters) == dict:
times = np.concatenate(times, axis=1)
return list_train_accs, list_test_accs, weights, times, [tr_repres, te_repres]
def perform_expe(num_runs=1, path_dataset=None, dataset="custom",
model=None, diags=[np.empty([0,0,0])], feats=np.empty([0,0]), labels=np.empty([0,0]),
optim_parameters={}, perslay_cv=10, standard_model=False, standard_parameters=[], standard_cv=10, verbose=True):
if path_dataset is not None:
path_dataset = "./data/" + dataset + "/" if not len(path_dataset) else path_dataset
filepath = path_dataset + dataset + ".conf"
dataset_type, filt_parameters, perslay_parameters, combs, optim_parameters = load_config(filepath=filepath)
thresh = filt_parameters["pad"]
diag, feats, labels = load_diagfeatlabels(dataset, path_dataset=path_dataset, verbose=verbose)
diags, _ = preprocess(diag, thresh=thresh)
if type(filt_parameters) is not list and type(perslay_parameters) is not list:
model = baseModel(filt_parameters, perslay_parameters, labels, combination=combs)
else:
model = []
list_filt = filt_parameters if type(filt_parameters) == list else [filt_parameters]
list_pers = perslay_parameters if type(perslay_parameters) == list else [perslay_parameters]
list_comb = combs if type(perslay_parameters) == list else [combs]
for fi in list_filt:
for idx, pe in enumerate(list_pers):
model.append(baseModel(fi, pe, labels, combination=combs[idx]))
if type(optim_parameters) is not list:
mode, num_folds, num_epochs = optim_parameters["mode"], optim_parameters["folds"], optim_parameters["num_epochs"]
else:
mode, num_folds, num_epochs = optim_parameters[0]["mode"], optim_parameters[0]["folds"], optim_parameters[0]["num_epochs"]
# Train and test data.
train_accs_res = np.zeros([num_runs, num_folds, num_epochs]) if not standard_model else np.zeros([num_runs, num_folds, num_epochs+1])
test_accs_res = np.zeros([num_runs, num_folds, num_epochs]) if not standard_model else np.zeros([num_runs, num_folds, num_epochs+1])
for idx_score in range(num_runs):
print("Run number %i" % (idx_score+1))
print("*************")
if mode == "KF": # Evaluation with k-fold on test set
folds = KFold(n_splits=num_folds, random_state=idx_score, shuffle=True).split(np.empty([feats.shape[0]]))
if mode == "RP": # Evaluation with random test set
test_size = optim_parameters["test_size"] if type(optim_parameters) is not list else optim_parameters[0]["test_size"]
folds = ShuffleSplit(n_splits=num_folds, test_size=test_size, random_state=idx_score).split(np.empty([feats.shape[0]]))
for idx, (train_sub, test_sub) in enumerate(folds):
print("Run number %i -- fold %i" % (idx_score+1, idx+1))
print(str(len(train_sub)) + " train points and " + str(len(test_sub)) + " test points")
# Evaluation of neural network
if type(model) is not list and type(optim_parameters) is not list:
best_model, best_optim = model, optim_parameters
else:
list_model = model if type(model) == list else [model]
list_optim = optim_parameters if type(optim_parameters) == list else [optim_parameters]
best_model, best_avg, best_optim = list_model[0], 0., list_optim[0]
for mdl in list_model:
for opt in list_optim:
avg_acc = 0.
folds_inner = KFold(n_splits=perslay_cv, random_state=idx+1, shuffle=True).split(np.empty([len(train_sub)]))
for _, (train_param, valid_param) in enumerate(folds_inner):
_, te, _, _, _ = _evaluate_nn_model(labels, feats, diags, train_sub[train_param], train_sub[valid_param], mdl, opt, verbose=False)
avg_acc += te[-1] / perslay_cv
if avg_acc > best_avg:
best_model, best_avg, best_optim = mdl, avg_acc, opt
ltrain, ltest, _, _, vecs = _evaluate_nn_model(labels, feats, diags, train_sub, test_sub, best_model, best_optim, verbose)
if standard_model:
tr_vectors, te_vectors = vecs[0][0], vecs[1][0]
tr_labels, te_labels = np.array([np.where(vecs[0][1][i,:]==1)[0][0] for i in range(len(tr_vectors))]), np.array([np.where(vecs[1][1][i,:]==1)[0][0] for i in range(len(te_vectors))])
pipe = Pipeline([("Estimator", SVC())])
std_model = GridSearchCV(pipe, standard_parameters, cv=standard_cv)
std_model = std_model.fit(tr_vectors, tr_labels)
ltrain.append(100 * std_model.score(tr_vectors, tr_labels))
ltest.append(100 * std_model.score(te_vectors, te_labels))
train_accs_res[idx_score, idx, :] = np.array(ltrain)
test_accs_res[idx_score, idx, :] = np.array(ltest)
filt_print = [m.get_parameters()[0] for m in model] if type(model) == list else model.get_parameters()[0]
pers_print = [m.get_parameters()[1] for m in model] if type(model) == list else model.get_parameters()[1]
comb_print = [m.get_parameters()[2] for m in model] if type(model) == list else model.get_parameters()[2]
output = "./" if path_dataset is None else path_dataset
with open(output + "summary.txt", "w") as text_file:
text_file.write("DATASET: " + dataset + "\n")
text_file.write(str(datetime.datetime.now()) + "\n\n")
text_file.write("****** " + str(num_runs) + " RUNS SUMMARY ******\n")
text_file.write("Mode: " + mode + ", number of folds: " + str(num_folds) + "\n")
text_file.write("Filtrations parameters: " + str(filt_print) + "\n")
text_file.write("PersLay parameters: " + str(pers_print) + "\n")
text_file.write("Linear combinations: " + str(comb_print) + "\n")
text_file.write("Optimization parameters: " + str(optim_parameters) + "\n")
if standard_model:
text_file.write("Standard classifiers: " + str(standard_parameters) + "\n")
folders_means = np.mean(test_accs_res, axis=1)
overall_best_epoch = np.argmax(np.mean(folders_means, axis=0))
final_means = folders_means[:, -1]
best_means = folders_means[:, overall_best_epoch]
text_file.write("Mean: " + str(np.round(np.mean(final_means), 2)) + "% +/- " + str(np.round(np.std(final_means), 2)) + "%\n")
text_file.write("Best mean: " + str(np.round(np.mean(best_means), 2)) + "% +/- " + str(np.round(np.std(best_means), 2)) + "%, reached at epoch " + str(overall_best_epoch + 1))
print("Mean: " + str(np.round(np.mean(final_means), 2)) + "% +/- " + str(np.round(np.std(final_means), 2)) + "%")
print("Best mean: " + str(np.round(np.mean(best_means), 2)) + "% +/- " + str(np.round(np.std(best_means), 2)) + "%, reached at epoch " + str(overall_best_epoch + 1))
np.save(output + "train_accs.npy", train_accs_res)
np.save(output + "test_accs.npy", train_accs_res)
return
def single_run(test_size, path_dataset=None, dataset="custom",
model=None, diags=[ | np.empty([0,0,0]) | numpy.empty |
import superimport
import random
import numpy as np
import matplotlib.pyplot as plt
from numpy import transpose
from numpy.random import default_rng
from math import exp, sqrt, sin, pi, cos
import pyprobml_utils as pml
def gpKernelPlot(seed):
if seed == 1:
return
X = np.array([1, 2, 3])
X_t = np.atleast_2d(X).transpose()
X = X_t * 2
y = np.array([1, 2, 4])
y = np.atleast_2d(y).transpose()
y = y - | np.mean(y) | numpy.mean |
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
from ..common.toolbox import embedding,correlation,decide_dim
from ..common.surrogate import twin_surrogate
from ..common.distance import calic_dist_l2
import tensorflow as tf
CCM_PARAM = {
"save_path": "./",
"emb_dim": 5,
"discard": 10,
}
def set_params(**kwargs):
if "save" in kwargs:
CCM_PARAM["save_path"] = kwargs["save"]
if "discard" in kwargs:
CCM_PARAM["discard"] = kwargs["discard"]
def xmap(x, k_dist, k_idx,emb_dim,tau,eps = 1e-5):
length = k_dist.shape[0]
x_tilde = np.empty((length))
for i in range(length):
u = np.exp(-k_dist[i, :] / (k_dist[i, 0] +eps))
w = u / np.sum(u)
x_tilde[i] = np.sum(w * x[ k_idx[i, :] +(emb_dim -1) * tau])
return x_tilde
def estimate(x, y, length=None, emb_dim=None,tau = 1, plot=False):
"""
method to estimate timeseries X from information of y.
t: the length of time you estimate, we estimate x[:t]
discard: discard some of the time series
note: please discard some of the time series if you use dynamic system like logisticmap.
Some of the initial time series should be discarded.
:return rho and estimated x
"""
x = np.array(x)
y = np.array(y)
if not emb_dim:
emb_dim = CCM_PARAM["emb_dim"]
emb = embedding(y, emb_dim,tau=tau)
if not length:
length = emb.shape[0]
emb = emb[:length]
rho, x_tilde = estimate_from_emb(x[:length+(emb_dim -1 ) * tau], emb,tau, plot=plot)
return rho, x_tilde
def estimate_from_emb(x, emb, tau = 1, plot=False):
length = emb.shape[0]
emb_dim = emb.shape[1]
dist_arr, dist_idx = calic_all_dist(emb)
k_dist, k_idx = k_nearest(dist_arr, dist_idx, length, emb_dim + 1)
x_tilde = xmap(x, k_dist, k_idx,emb_dim,tau)
if plot:
plt.scatter(x[(emb_dim-1)*tau:], x_tilde)
plt.show()
rho = correlation(x[(emb_dim-1)*tau:], x_tilde)
return rho, x_tilde
def estimate_using_bootstrap(x,y, length="auto", emb_dim=5,tau = 1):
"""
estimate x from y to judge x->y cause
:param x:
:param y:
:param length:
:param emb_dim:
:param tau:
:return:
"""
emb_y = embedding(y,emb_dim,tau)
max_length = len(emb_y)
if length =="auto":
length = max_length
rho, x_tilde = estimate_from_emb_random(x,emb_y,length,max_length,emb_dim,tau)
return rho, x_tilde
def estimate_from_emb_random(x,emb_y,length,max_length,emb_dim,tau):
idxs = np.random.choice(np.arange(max_length), length, replace=False)
y_selected = emb_y[idxs]
x_selected = x[idxs + (emb_dim - 1) * tau]
padding = np.empty((emb_dim - 1) * tau)
x = np.concatenate([padding, x_selected])
rho, x_tilde = estimate_from_emb(x, y_selected, tau)
return rho, x_tilde
def convergent(x, y, start = 0, length=None, emb_dim=None, tau = 1, min_length=None, estimation_freq=1,option = "linear"):
"""
see wheter rho increase with more use of time series. using x[start:start+length]
:param x:
:param y:
:param start:
:param length:
:param emb_dim:
:param min_length:
:return: rho_array
"""
x = np.array(x)
y = | np.array(y) | numpy.array |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
( | np.datetime64() | numpy.datetime64 |
#!/usr/bin/env python3
# -*- coding = utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression(object):
def __init__(self):
# Epsilon factor to prevent division by zero.
self._epsilon = 1e-7
# Other general class values which need to be initialized.
self._weights = None
self.data = None
self._is_data_intercepted = False
def __len__(self):
# Return the length of the weight array.
if self._weights is None:
raise NotImplementedError(
"You need to fit the logistic regression to training data "
"to initialize the weights and therefore give them a length.")
else:
return self._weights.shape[0]
def __str__(self):
# Return the class weights.
return " ".join(f"{item:.2f}" for item in self._weights)
@property
def epsilon(self):
"""Method to view class epsilon value."""
return self._epsilon
@epsilon.setter
def epsilon(self, value):
"""Method to set class epsilon value (cannot be done on __init__)."""
self._epsilon = value
@property
def weights(self):
"""Method to view class weights values."""
return self._weights
@weights.setter
def weights(self, weights):
"""A method to set the class weights (if you already have them)."""
self._weights = weights
@staticmethod
def _assert_sizes(*args):
"""Ensure that input arrays have the same shape/length."""
default_length = len(args[0])
for value in args:
if len(value) != default_length:
raise ValueError(f"The inputted arrays have different lengths, "
f"e.g. {len(value)} and {default_length}.")
@staticmethod
def _convert_to_numpy(*args):
"""Internal decorator, converts inputted arguments to a usable NumPy array format."""
converted_args = []
for item in args:
if isinstance(item, np.ndarray):
converted_args.append(item)
elif isinstance(item, (int, float)):
converted_args.append(np.atleast_1d(item))
else:
converted_args.append(np.array(item))
return converted_args
def sigmoid(self, z):
"""A convenience internal method to return sigmoid(X)."""
return 1 / (1 + np.exp(self.epsilon - z))
def binary_crossentropy(self, y_pred, y_true):
"""A convenience internal method to return the binary crossentropy
loss between predicted/true values (during gradient descent)."""
y_true, y_pred = self._convert_to_numpy(y_true, y_pred)
self._assert_sizes(y_true, y_pred)
# Clip to the epsilon value.
y_pred = np.clip(y_pred, self.epsilon, 1. - self.epsilon)
# Get the batch size and return the actual loss.
batch_size = y_pred.shape[0]
return -np.sum(y_true * np.log(y_pred + 1e-9)) / batch_size
def generate_scatter_data(self, amount, seed = None):
"""Generates scatter data for the logistic regression algorithm (and plotting)."""
# Set a specific seed to keep generating the same data.
if seed is not None:
try: # Set the actual seed.
np.random.seed(seed)
except Exception as e:
# Except invalid types or values.
raise e
else:
# There is no seed necessary
np.random.seed(None)
# Create multivariate normal distribution values.
choices1 = np.random.randint(0, 5, size = (2, ))
choices2 = np.random.randint(6, 10, size = (2, ))
# Create the data for two different classes.
class1 = np.random.multivariate_normal(
[choices1[0], choices1[1]], [[1, .75], [.75, 1]], amount)
label1 = np.zeros(amount)
class2 = np.random.multivariate_normal(
[choices2[0], choices2[1]], [[1, .75], [.75, 1]], amount)
label2 = np.ones(amount)
# Create stacked data and labels.
features = np.vstack((class1, class2)).astype(np.float32)
labels = np.hstack((label1, label2))
# Set the data to the class.
self.data = (features, labels)
# Return the features and labels.
return features, labels
def plot_scatter_data(self, *args):
"""Creates a two-dimensional scatter plot of data points."""
args = self._convert_to_numpy(*args)
self._assert_sizes(args)
# For binary logistic regression, there should only be two classes.
if len(args) != 2:
# In this case, we have received multiple arrays that are incorrect.
raise ValueError(
f"Expected only two arrays, one containing data and one containing "
f"labels, instead received {len(args)} arrays. ")
# Validate the rest of the data features.
if np.size(args[0], -1) != 2:
raise ValueError(
f"Expected a two-dimensional data array, instead got {np.size(args[0], -1)}.")
if not np.array_equal(args[1], args[1].astype(np.bool)):
raise ValueError(
"The label array should be binary (0s and 1s), instead got multiple classes.")
# Plot the data.
plt.figure(figsize = (12, 8))
plt.scatter(args[0][:, 0], args[0][:, 1], c = args[1], alpha = 0.4)
# Display the plot.
plt.show()
# Convenience return for stacked method calls.
return self
def _gather_data(self, X = None, y = None):
"""Gets the data from provided values and returns it."""
# Cases to determine the data being used.
if self.data is not None:
# The class has data saved already (from generate_scatter_data, etc.)
X, y = self.data[0], self.data[1]
elif isinstance(X, tuple) and y is None:
# There may be a single tuple containing both X and y.
X, y = X[0], X[1]
else:
# Data is just inputted as normal.
X, y = X, y
# Set the data to the class.
self.data = (X, y)
# Return the X and y values.
return X, y
def _initialize_weights(self, X):
"""Initializes the class weights (or re-initializes them for new data)."""
del self._weights
self._weights = np.zeros(X.shape[1])
def _update_weights(self, X, y_pred, y_true, lr, l2_coef):
"""Updates the class weights for logistic regression."""
# Calculate the gradient between the expected and predictions.
discrepancy = y_true - y_pred
grad = np.dot(X.T, discrepancy)
# Apply regularization if a coefficient is provided.
if l2_coef is not None:
grad = l2_coef * grad + np.sum(self._weights)
# Update the class weights.
self._weights += grad * lr
def fit(self, X = None, y = None, epochs = 10000, lr = 0.001, verbose = True,
add_intercept = True, l2_coef = 0.5, override_callback = False):
"""Fits the logistic regression algorithm to the provided data and labels."""
# Dispatch to gather data.
X, y = self._gather_data(X, y)
# Add an intercept value (for smoothing data).
if add_intercept:
intercept = np.ones((X.shape[0], 1))
X = np.hstack((intercept, X))
# Tracker for evaluation method.
self._is_data_intercepted = True
# Dispatch to initialize weights.
self._initialize_weights(X)
# Create a loss tracker for early stopping.
loss_tracker = []
# Iterate over each epoch.
for epoch in range(epochs):
# Calculate the current prediction.
predictions = self.sigmoid(np.dot(X, self._weights))
# A default early stopping criterion, stops training if nothing improves
# or if the loss actually starts to go up instead of down.
if not override_callback:
if len(loss_tracker) < (epochs // 10):
loss_tracker.append(self.binary_crossentropy(predictions, y))
else:
loss_tracker = loss_tracker[1:]
loss_tracker.append(self.binary_crossentropy(predictions, y))
# Determine if the loss is not changing.
if np.all(np.isclose(loss_tracker, loss_tracker[0])):
print(f"Stopping training early because loss is not decreasing. "
f"Final Loss: {self.binary_crossentropy(predictions, y)}")
break
# Determine if the loss is actually going up.
if not np.diff(np.array(loss_tracker)).all() > 0:
print(f"Stopping training early because loss is increasing. "
f"Final Loss: {self.binary_crossentropy(predictions, y)}")
break
# Dispatch to weight updating method.
self._update_weights(X, predictions, y, lr = lr, l2_coef = l2_coef)
# Print out the binary-crossentropy loss if necessary.
if verbose:
if epoch % (epochs / 20) == 0:
print(f"Epoch {epoch}\t Loss: {self.binary_crossentropy(predictions, y)}")
# Convenience return for stacked method calls.
return self
def evaluate(self, X = None, y = None):
"""Evaluates the logistic regression algorithm on the data (accuracy and loss)."""
# Cases to determine the data being used.
X, y = self._gather_data(X, y)
# Determine if there is an intercept necessary.
if self._is_data_intercepted:
intercept = np.ones((X.shape[0], 1))
X = np.hstack((intercept, X))
# Calculate the predictions.
total_predictions = np.round(self.sigmoid(np.dot(X, self._weights)))
# Calculate the accuracy and loss.
accuracy = np.sum(total_predictions == y).astype(np.float) / len(total_predictions)
loss = self.binary_crossentropy(total_predictions, y)
# Print out the accuracy.
print(f"Accuracy: {accuracy * 100:.2f}%\t Loss: {loss}")
# Convenience return for stacked method calls.
return self
def predict(self, X):
"""Predicts the class of a piece of data with the trained algorithm."""
if self.data is None:
raise ValueError("You need to fit the algorithm to data before trying to predict.")
if len(X.shape) == 1:
# A single piece of data has been passed.
return self.sigmoid(np.dot(X, self._weights))
else:
# Otherwise, we need to iterate over all of the passed items.
if self._is_data_intercepted:
# Determine if there is an intercept necessary.
intercept = | np.ones((X.shape[0], 1)) | numpy.ones |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
self.assertRaises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
self._check_pandas_roundtrip(expected, null_counts=[1])
def test_boolean_object_nulls(self):
repeats = 100
arr = | np.array([False, None, True] * repeats, dtype=object) | numpy.array |
"""
DQN-Wrappers-Env
@author: [<NAME>](https://github.com/luiz-resende)
@date: Created on Tue Oct 19, 2021
This script contains the necessary environment class wrappers to preprocess the
OpenAI-Gym and ALE Atari environments, as well as the MinAtar environment. The base
code was extracted from ``baselines.common.atari_wrappers`` and modified to
clean-up the code and include new methods and classes.
"""
from typing import Any, Dict, List, Optional, Tuple, Union
from collections import deque
import gym
from gym import spaces
from gym.wrappers import TimeLimit
import numpy as np
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0."""
def __init__(self, env=None, noop_max=30):
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
"""Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
"""
Step observation in the environment.
Parameters
----------
ac : int
Action taken.
Returns
-------
numpy.ndarray
Next state given action chosen.
"""
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing."""
def __init__(self, env=None):
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
"""
Reset environment to initial state.
Returns
-------
obs : numpy.ndarray
Initial state observation.
"""
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
"""
Step observation in the environment.
Parameters
----------
ac : int
Action taken.
Returns
-------
numpy.ndarray
Next state given action chosen.
"""
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
"""
Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
Parameters
----------
env : gym.envs.atari.environment.AtariEnv
Environment.
"""
def __init__(self, env=None):
super(EpisodicLifeEnv, self).__init__(env)
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def step(self, action):
"""
Step observation in the environment.
Parameters
----------
action : int
Action to be taken.
Returns
-------
numpy.ndarray
Next state given action chosen.
"""
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# Check current lives, make loss of life terminal then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if ((lives < self.lives) and (lives > 0)):
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""
Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if (self.was_real_done):
obs = self.env.reset(**kwargs)
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""
Return only every 'skip'^{th} frame
Parameters
----------
env : gym.envs.atari.environment.AtariEnv
Environment.
skip : int, optional
Number of frames to skip. The default is 4.
"""
def __init__(self, env=None, skip=4):
super(MaxAndSkipEnv, self).__init__(env)
# Most recent raw observations (for max pooling across time steps)
self._obs_buffer = | np.zeros((2,) + env.observation_space.shape, dtype=np.uint8) | numpy.zeros |
import os
import cv2
from base_camera import BaseCamera
import numpy as np
import switch
import datetime
import Kalman_filter
import PID
import time
import threading
import imutils
import robotLight
light = robotLight.RobotLight()
pid = PID.PID()
pid.SetKp(0.5)
pid.SetKd(0)
pid.SetKi(0)
CVRun = 1
linePos_1 = 440
linePos_2 = 380
lineColorSet = 255
frameRender = 1
findLineError = 20
colorUpper = | np.array([44, 255, 255]) | numpy.array |
import numpy as np
import urllib
import os
import argparse
from sklearn.cross_validation import train_test_split
from astroML.plotting import setup_text_plots
import empiriciSN
from MatchingLensGalaxies_utilities import *
from astropy.io import fits
import GCRCatalogs
import pandas as pd
from GCR import GCRQuery
sys.path.append('/global/homes/b/brycek/DC2/sims_GCRCatSimInterface/workspace/sed_cache/')
from SedFitter import sed_from_galacticus_mags
from lsst.sims.photUtils import Sed, Bandpass, BandpassDict
def get_sl2s_data():
filename = os.path.join(os.environ['TWINKLES_DIR'], 'data',
'SonnenfeldEtal2013_Table3.csv')
z = np.array([])
z_err = np.array([])
v_disp = np.array([])
v_disp_err = np.array([])
r_eff = np.array([])
r_eff_err = np.array([])
log_m = np.array([])
log_m_err = np.array([])
infile = open(filename, 'r')
inlines = infile.readlines()
for line1 in inlines:
if line1[0] == '#': continue
line = line1.split(',')
#Params
z = np.append(z, float(line[1]))
v_disp = np.append(v_disp, float(line[2]))
r_eff = np.append(r_eff, float(line[3]))
log_m = np.append(log_m, float(line[4]))
#Errors
z_err = np.append(z_err, float(line[5]))
v_disp_err = np.append(v_disp_err, float(line[6]))
r_eff_err = np.append(r_eff_err, float(line[7]))
log_m_err = np.append(log_m_err, float(line[8]))
#Build final arrays
X = np.vstack([z, v_disp, r_eff, log_m]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([z_err**2, v_disp_err**2,
r_eff_err**2, log_m_err**2]).T
return X, Xerr
#Write new conditioning function
def get_log_m(cond_indices, m_index, X, model_file, Xerr=None):
"""
Uses a subset of parameters in the given data to condition the
model and return a sample value for log(M/M_sun).
Parameters
----------
cond_indices: array_like
Array of indices indicating which parameters to use to
condition the model.
m_index: int
Index of log(M/M_sun) in the list of parameters that were used
to fit the model.
X: array_like, shape = (n < n_features,)
Input data.
Xerr: array_like, shape = (X.shape,) (optional)
Error on input data. If none, no error used to condition.
Returns
-------
log_m: float
Sample value of log(M/M_sun) taken from the conditioned model.
Notes
-----
The fit_params array specifies a list of indices to use to
condition the model. The model will be conditioned and then
a mass will be drawn from the conditioned model.
This is so that the mass can be used to find cosmoDC2 galaxies
to act as hosts for OM10 systems.
This does not make assumptions about what parameters are being
used in the model, but does assume that the model has been
fit already.
"""
if m_index in cond_indices:
raise ValueError("Cannot condition model on log(M/M_sun).")
cond_data = np.array([])
if Xerr is not None: cond_err = np.array([])
m_cond_idx = m_index
n_features = empiricist.XDGMM.mu.shape[1]
j = 0
for i in range(n_features):
if i in cond_indices:
cond_data = np.append(cond_data,X[j])
if Xerr is not None: cond_err = np.append(cond_err, Xerr[j])
j += 1
if i < m_index: m_cond_idx -= 1
else:
cond_data = np.append(cond_data,np.nan)
if Xerr is not None: cond_err = np.append(cond_err, 0.0)
if Xerr is not None:
cond_XDGMM = empiricist.XDGMM.condition(cond_data, cond_err)
else: cond_XDGMM = empiricist.XDGMM.condition(cond_data)
sample = cond_XDGMM.sample()
log_m = sample[0][m_cond_idx]
return log_m
def estimate_stellar_masses_om10():
# Instantiate an empiriciSN worker object:
empiricist = empiriciSN.Empiricist()
X, Xerr = get_sl2s_data()
# Load in cached om10 catalog
filename = filename = os.path.join(os.environ['TWINKLES_DIR'], 'data', 'om10_qso_mock.fits')
hdulist = fits.open(filename)
twinkles_lenses = hdulist[1].data
# Predict a mass for each galaxy:
np.random.seed(0)
cond_indices = np.array([0,1])
twinkles_log_m_1comp = np.array([])
model_file='demo_model.fit'
empiricist.fit_model(X, Xerr, filename = 'demo_model.fit', n_components=1)
twinkles_data = np.array([twinkles_lenses['ZLENS'], twinkles_lenses['VELDISP']]).T
for x in twinkles_data:
log_m = get_log_m(cond_indices, 2, x[cond_indices], model_file)
twinkles_log_m_1comp = np.append(twinkles_log_m_1comp,log_m)
return twinkles_lenses, log_m, twinkles_log_m_1comp
def get_catalog(catalog, twinkles_lenses, twinkles_log_m_1comp):
gcr_om10_match = []
err = 0
| np.random.seed(10) | numpy.random.seed |
# coding=UTF-8
import gc
import copy
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
import tensorflow as tf
import random
import os
from sklearn import preprocessing
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import deepwalk
from sklearn.decomposition import PCA
import argparse
import networkx as nx
# import node2vec
from openne import graph, node2vec, gf, lap, hope, sdne
import math
#droprate=0.4
def get_embedding(vectors: dict):
matrix = np.zeros((
12118,
len(list(vectors.values())[0])
))
print("axis 0:")
print(len(vectors))
print("axis 1:")
print(len(list(vectors.values())[0]))
for key, value in vectors.items():
matrix[int(key), :] = value
return matrix
def get_embedding_lap(vectors: dict):
matrix = np.zeros((
12118,
128
))
for key, value in vectors.items():
matrix[int(key), :] = value
return matrix
def processEmb(oldfile,newfile):
f = open(oldfile)
next(f)
for line in f:
f1 = open(newfile,'a+')
f1.write(line)
f1.close()
f.close()
def clearEmb(newfile):
f = open(newfile,'w')
f.truncate()
def Net2edgelist(gene_disease_matrix_net):
none_zero_position = np.where(np.triu(gene_disease_matrix_net) != 0)
none_zero_row_index = np.mat(none_zero_position[0],dtype=int).T
none_zero_col_index = np.mat(none_zero_position[1],dtype=int).T
none_zero_position = np.hstack((none_zero_row_index,none_zero_col_index))
none_zero_position = np.array(none_zero_position)
name = 'gene_disease.txt'
np.savetxt(name, none_zero_position,fmt="%d",delimiter=' ')
#获得gene_disease_emb
def Get_embedding_Matrix_sdne(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_sdne=Get_sdne(graph1)
return _sdne
def Get_embedding_Matrix_gf(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_gf=Get_gf(graph1)
return _gf
def Get_embedding_Matrix_n2v(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_n2v=Get_n2v(graph1)
return _n2v
def Get_embedding_Matrix_dw(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_dw=Get_dw(graph1)
return _dw
def Get_embedding_Matrix_lap(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_lap=Get_lap(graph1)
return _lap
def Get_embedding_Matrix_hope(gene_disease_matrix_net):
Net2edgelist(gene_disease_matrix_net)
graph1 = graph.Graph()
graph1.read_edgelist("gene_disease.txt")
print(graph1)
_hope=Get_hope(graph1)
return _hope
def Get_sdne(graph1):
model = sdne.SDNE(graph1, [1000, 128])
return get_embedding(model.vectors)
def Get_n2v(graph1):
model = node2vec.Node2vec(graph=graph1, path_length=80, num_paths=10, dim=128)
n2v_vectors = get_embedding(model.vectors)
return n2v_vectors
def Get_dw(graph1):
model = node2vec.Node2vec(graph=graph1, path_length=80, num_paths=10, dim=128, dw=True)
n2v_vectors = get_embedding(model.vectors)
return n2v_vectors
def Get_gf(graph1):
model = gf.GraphFactorization(graph1)
return get_embedding(model.vectors)
def Get_lap(graph1):
model = lap.LaplacianEigenmaps(graph1)
return get_embedding_lap(model.vectors)
def Get_hope(graph1):
model = hope.HOPE(graph=graph1, d=128)
return get_embedding(model.vectors)
def get_gaussian_feature(A_B_matrix):
row=A_B_matrix.shape[0]
column=A_B_matrix.shape[1]
A_matrix=np.zeros((row,row))
for i in range(0,row):
for j in range(0,row):
A_matrix[i,j]=math.exp(-np.linalg.norm(np.array(A_B_matrix[i,:]-A_B_matrix[j,:]))**2)
B_matrix=np.zeros((column,column))
for i in range(0,column):
for j in range(0,column):
B_matrix[i,j]=math.exp(-np.linalg.norm(np.array(A_B_matrix[:,i]-A_B_matrix[:,j]))**2)
A_matrix=np.matrix(A_matrix)
B_matrix=np.matrix(B_matrix)
return A_matrix,B_matrix
def make_prediction1(train_feature_matrix, train_label_vector,test_feature_matrix):
clf = RandomForestClassifier(random_state=1, n_estimators=200, oob_score=True, n_jobs=-1)
clf.fit(train_feature_matrix, train_label_vector)
predict_y_proba = np.array(clf.predict_proba(test_feature_matrix)[:, 1])
return predict_y_proba
def make_prediction2(train_feature_matrix, train_label_vector,test_feature_matrix):
clf = MLPClassifier(solver='adam',activation = 'relu',max_iter = 100,alpha = 1e-5,hidden_layer_sizes = (128,64),verbose = False,early_stopping=True)
clf.fit(train_feature_matrix, train_label_vector)
predict_y_proba = np.array(clf.predict_proba(test_feature_matrix)[:,1])
return predict_y_proba
def constructNet(gene_dis_matrix,dis_chemical_matrix,gene_chemical_matrix,gene_gene_matrix):
disease_matrix = np.matrix(np.zeros((dis_chemical_matrix.shape[0], dis_chemical_matrix.shape[0]), dtype=np.int8))
chemical_matrix = np.matrix(np.zeros((dis_chemical_matrix.shape[1], dis_chemical_matrix.shape[1]),dtype=np.int8))
mat1 = np.hstack((gene_gene_matrix,gene_chemical_matrix,gene_dis_matrix))
mat2 = np.hstack((gene_chemical_matrix.T,chemical_matrix,dis_chemical_matrix.T))
mat3 = np.hstack((gene_dis_matrix.T,dis_chemical_matrix,disease_matrix))
return np.vstack((mat1,mat2,mat3))
def calculate_metric_score(real_labels,predict_score):
# evaluate the prediction performance
precision, recall, pr_thresholds = precision_recall_curve(real_labels, predict_score)
aupr_score = auc(recall, precision)
all_F_measure = np.zeros(len(pr_thresholds))
for k in range(0, len(pr_thresholds)):
if (precision[k] + recall[k]) > 0:
all_F_measure[k] = 2 * precision[k] * recall[k] / (precision[k] + recall[k])
else:
all_F_measure[k] = 0
print("all_F_measure: ")
print(all_F_measure)
max_index = all_F_measure.argmax()
threshold = pr_thresholds[max_index]
fpr, tpr, auc_thresholds = roc_curve(real_labels, predict_score)
auc_score = auc(fpr, tpr)
f = f1_score(real_labels, predict_score)
print("F_measure:"+str(all_F_measure[max_index]))
print("f-score:"+str(f))
accuracy = accuracy_score(real_labels, predict_score)
precision = precision_score(real_labels, predict_score)
recall = recall_score(real_labels, predict_score)
print('results for feature:' + 'weighted_scoring')
print( '************************AUC score:%.3f, AUPR score:%.3f, precision score:%.3f, recall score:%.3f, f score:%.3f,accuracy:%.3f************************' % (
auc_score, aupr_score, precision, recall, f, accuracy))
results = [auc_score, aupr_score, precision, recall, f, accuracy]
return results
def ensemble_scoring(test_label_vector, predict_y,predict_y_proba): # 计算3种集成方法的scores
AUPR = average_precision_score(test_label_vector, predict_y_proba)
AUC = roc_auc_score(test_label_vector, predict_y_proba)
MCC = matthews_corrcoef(test_label_vector, predict_y)
ACC = accuracy_score(test_label_vector, predict_y, normalize=True)
F1 = f1_score(test_label_vector, predict_y, average='binary')
REC = recall_score(test_label_vector, predict_y, average='binary')
PRE = precision_score(test_label_vector, predict_y, average='binary')
metric = np.array((AUPR, AUC, PRE, REC, ACC, MCC, F1))
return metric
def cross_validation_experiment(gene_dis_matrix,dis_chemical_matrix,gene_chemical_matrix,gene_gene_matrix,seed,ratio = 1):
none_zero_position = np.where(gene_dis_matrix != 0)
none_zero_row_index = none_zero_position[0]
none_zero_col_index = none_zero_position[1]
zero_position = np.where(gene_dis_matrix == 0)
zero_row_index = zero_position[0]
zero_col_index = zero_position[1]
random.seed(seed)
zero_random_index = random.sample(range(len(zero_row_index)), ratio * len(none_zero_row_index))
zero_row_index = zero_row_index[zero_random_index]
zero_col_index = zero_col_index[zero_random_index]
row_index = np.append(none_zero_row_index, zero_row_index)
col_index = | np.append(none_zero_col_index, zero_col_index) | numpy.append |
import numpy as np
import pickle
import cvxopt
from cvxopt import matrix, solvers
import sklearn
from sklearn.metrics import classification_report, confusion_matrix , accuracy_score, mean_squared_error
from sklearn.model_selection import train_test_split
from python.svmutil import *
import time
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
path_train = sys.argv[1];
path_test = sys.argv[2];
binary = int(sys.argv[3]);
part = sys.argv[4];
train_array = np.genfromtxt(path_train,delimiter=',');
train_array_x = train_array[:,0:784]/255;
train_array_y = train_array[:,784].reshape(train_array.shape[0],1);
test_array = np.genfromtxt(path_test,delimiter=',');
test_array_x = test_array[:,0:784]/255;
test_array_y = test_array[:,784].reshape(test_array.shape[0],1);
def linear_kernel(X,y):
M = y * X;
return np.dot(M, M.T);
def gaussian_kernel(X, sigma=0.05):
m = X.shape[0];
X2 = np.sum(np.multiply(X, X),axis=1, keepdims=True);
K0 = X2 + X2.T - 2 * np.matmul(X, X.T)
return np.power(np.exp(-sigma),K0);
def gaussian_kernel_elem(X1,X2, sigma = 0.05):
return np.exp(-sigma * np.linalg.norm(X1-X2)**2)
class SVM(object):
def __init__(self, kernel=linear_kernel, C=1.0):
self.kernel = kernel;
self.C = C;
def fit(self, X, y):
m,n = X.shape;
if(self.kernel == linear_kernel):
P = matrix(self.kernel(X,y));
else:
K = gaussian_kernel(X);
P = matrix(y * y.T * K);
q = matrix(np.ones(m) * -1.0);
A = matrix(y,(1,m));
b = matrix(0.0);
G1 = np.eye(m) * -1.0;
G2 = np.eye(m);
G = matrix(np.vstack((G1, G2)));
h1 = np.zeros(m);
h2 = np.ones(m) * self.C;
h = matrix(np.hstack((h1, h2)));
solvers.options['show_progress'] = False
solution = solvers.qp(P, q, G, h, A, b);
alpha = np.ravel((solution['x'])).reshape(m,1);
self.supp_vec_flag = (alpha > 1e-5).ravel();
self.indices = np.arange(len(alpha))[self.supp_vec_flag];
self.alpha = alpha[self.supp_vec_flag];
self.supp_vec = X[self.supp_vec_flag];
self.supp_vec_y = y[self.supp_vec_flag];
if(self.kernel==linear_kernel):
self.w = np.sum(self.alpha * self.supp_vec * self.supp_vec_y, axis = 0, keepdims=True).T;
b1 = np.min(np.matmul(X[(y == 1).ravel()], self.w));
b2 = np.max(np.matmul(X[(y == -1).ravel()], self.w));
self.b = -(b1+b2)/2;
else:
self.w = None;
self.b = 0.0;
def predict(self, X):
if (self.kernel == linear_kernel):
return np.sign(np.dot(X, self.w) + self.b);
else:
m = X.shape[0];
pred = np.zeros(m);
for i in range(m):
temp = 0;
s=0;
#if(i%100==0):
#print(i)
for alpha, supp_vec, supp_vec_y in zip(self.alpha, self.supp_vec, self.supp_vec_y):
s += alpha * supp_vec_y * gaussian_kernel_elem(X[i],supp_vec);
pred[i] = s + self.b;
return np.sign(pred);
if(binary == 0):
X_train_bin = train_array_x[(train_array_y==3).ravel() | (train_array_y==4).ravel()];
Y_train_bin = train_array_y[(train_array_y==3).ravel() | (train_array_y==4).ravel()];
Y_train_bin = -1.0 * (Y_train_bin==3) + 1.0 * (Y_train_bin==4);
X_test_bin = test_array_x[(test_array_y==3).ravel() | (test_array_y==4).ravel()];
Y_test_bin = test_array_y[(test_array_y==3).ravel() | (test_array_y==4).ravel()];
Y_test_bin = -1.0 * (Y_test_bin==3) + 1.0 * (Y_test_bin==4);
if(part == 'a'):
clf = SVM();
clf.fit(X_train_bin, Y_train_bin);
preds = clf.predict(X_test_bin);
print("Linear Score: ", round(accuracy_score(preds,Y_test_bin),5))
if(part == 'b'):
clf = SVM(kernel=gaussian_kernel);
clf.fit(X_train_bin, Y_train_bin);
preds = clf.predict(X_test_bin);
print("RBF Score: ", round(accuracy_score(preds,Y_test_bin),5))
if(part == 'c'):
model = svm_train(Y_train_bin.ravel(),X_train_bin, '-s 0 -t 0 -g 0.05 -q');
label_predict, accuracy, decision_values=svm_predict(Y_test_bin.ravel(),X_test_bin,model, '-q');
print("Linear Score: ", round(accuracy_score(label_predict,Y_test_bin),5))
model = svm_train(Y_train_bin.ravel(),X_train_bin, '-s 0 -t 2 -g 0.05 -q');
label_predict_g, accuracy_g, decision_values_g=svm_predict(Y_test_bin.ravel(),X_test_bin,model,'-q');
print("RBF Score: ", round(accuracy_score(label_predict_g,Y_test_bin),5))
else:
if(part == 'a'):
for i in range(10):
for j in range(i+1,10):
suffix = str(i) + "-_-" + str(j);
globals()["Xtrain_" + suffix] = train_array_x[(train_array_y==i).ravel() | (train_array_y==j).ravel()];
temp = train_array_y[(train_array_y==i).ravel() | (train_array_y==j).ravel()];
globals()["Ytrain_" + suffix] = -1.0 * (temp==i) + 1.0 * (temp==j);
globals()["clf_" + suffix] = SVM(kernel=gaussian_kernel);
globals()["clf_" + suffix].fit(globals()["Xtrain_" + suffix], globals()["Ytrain_" + suffix]);
preds_train = np.zeros((train_array_y.shape[0],10,10))
for i in range(10):
for j in range(i+1,10):
suffix = str(i) + "-_-" + str(j);
temp = globals()["clf_" + suffix].predict(train_array_x);
preds_train[:,i,j] = (temp-1)/(-2);
preds_train[:,j,i] = (temp+1)/2;
preds_train = np.abs(preds_train)
preds_train = np.argmin( | np.sum(preds_train, axis=1) | numpy.sum |
import numpy as np
import matplotlib
matplotlib.use('agg')
from . import flags
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = "cpu"
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def merge_coresets(x_coresets, y_coresets):
merged_x, merged_y = x_coresets[0], y_coresets[0]
for i in range(1, len(x_coresets)):
merged_x = np.vstack((merged_x, x_coresets[i]))
merged_y = np.hstack((merged_y, y_coresets[i]))
return merged_x, merged_y
def get_coreset(x_coresets, y_coresets, single_head, coreset_size = 5000, gans = None, task_id=0):
if gans is not None:
if single_head:
merged_x, merged_y = gans[0].generate_samples(coreset_size, task_id)
for i in range(1, len(gans)):
new_x, new_y = gans[i].generate_samples(coreset_size, task_id)
merged_x = np.vstack((merged_x,new_x))
merged_y = np.hstack((merged_y,new_y))
return merged_x, merged_y
else:
return gans.generate_samples(coreset_size, task_id)[:coreset_size]
else:
if single_head:
return merge_coresets(x_coresets, y_coresets)
else:
return x_coresets, y_coresets
def get_scores(model, x_trainsets, y_trainsets, x_testsets, y_testsets, no_epochs, single_head, x_coresets, y_coresets, batch_size=None, just_vanilla = False, gans = None, is_toy=False):
task_num = len(x_trainsets)
acc = []
if single_head:
if len(x_coresets) > 0 or gans is not None:
x_train, y_train = get_coreset(x_coresets, y_coresets, single_head, coreset_size = 6000, gans = gans, task_id=0)
bsize = x_train.shape[0] if (batch_size is None) else batch_size
x_train = torch.Tensor(x_train)
y_train = torch.Tensor(y_train)
model.train(x_train, y_train, 0, no_epochs, bsize)
# this is only for the toy dataset visualisation -- probability contour plots
if(is_toy):
for i in range(len(x_trainsets)):
head = 0 if single_head else i
x_train, y_train = x_trainsets[i], y_trainsets[i]
x_min, x_max = x_train[:, 0].min() - 1, x_train[:, 0].max() + 1
y_min, y_max = x_train[:, 1].min() - 1, x_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
whole_space_data = np.stack((xx.ravel(),yy.ravel()), axis=-1)
N = whole_space_data.shape[0]
bsize = N if (batch_size is None) else batch_size
total_batch = int(np.ceil(N * 1.0 / bsize))
# Loop over all batches
for j in range(total_batch):
start_ind = j*bsize
end_ind = np.min([(j+1)*bsize, N])
batch_x_train = torch.Tensor(whole_space_data[start_ind:end_ind, :]).to(device = device)
# these are model probabilities over different samples of weights from the posterior distribution
pred = model.prediction_prob(batch_x_train, head)
# this simply takes the mean over all the different outputs with respect to the weight samples
if not just_vanilla:
pred_mean = pred.mean(0)
else:
pred_mean = pred
prob_ones = pred_mean[:, 0]
half_curve = []
for ind in range(len(prob_ones)):
if(prob_ones[ind]<=0.51 and prob_ones[ind]>=0.5):
half_curve.append(whole_space_data[ind])
half_curve = np.asarray(half_curve)
onethird_curve = []
for ind in range(len(prob_ones)):
if(prob_ones[ind]<=0.34 and prob_ones[ind]>=0.3):
onethird_curve.append(whole_space_data[ind])
onethird_curve = np.asarray(onethird_curve)
ninety_curve = []
for ind in range(len(prob_ones)):
if(prob_ones[ind]>=0.9):
ninety_curve.append(whole_space_data[ind])
ninety_curve = | np.asarray(ninety_curve) | numpy.asarray |
import numpy as np
from library import *
from datetime import datetime
from energy_evaluation import read_from_tags
## To reproduce Figures 6-9, do the following:
################# Step 1 ###############################
## First, run the classical optimization. For example:
def optimize_all(n=12,hz=0.1):
from optimization import optimize
for l in [0,1,2,3,4,5,6,7,8,9,10,15,20,25,30,35,40,45,50]:
for hx in [0.1,0.2,0.3,0.4,0.5,1.5]:
optimize(n,l,hx,hz,method='BFGS',gpu=True,jac=True)
## if jac=True, the gradient is computed analytically, in parallel. If gpu=True, then the optimization uses a gpu. (This requires cupy.) If you do not have a gpu, you should set gpu=False. You can play around with the different optimization methods in https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.htm. We usually used TNC, but BFGS may work well too. In optimization.py, change /your_directory/... to be the location where you want to save parameters. You will need to set up the directories before running the optimizer.
## if you want to impose cyclic permutation symmetry, then instead do
def optimize_all_symm(n=12,hz=0.1):
from optimization import optimize_symm
for l in [0,1,2,3,4,5,6,7,8,9,10,15,20,25,30,35,40,45,50]:
for hx in [0.1,0.2,0.3,0.4,0.5,1.5]:
optimize_symm(n,l,hx,hz,method='BFGS',gpu=True,jac=True)
## Same comments about gpu and jac as above.
## Make sure that your optimizers have (approximately) converged before continuing. For our work, we imposed permutation symmetry for the 20 qubit case above 10 layers.
############# Step 2 #########################
## Next, to compare the mitigation methods, submit ansatz circuits with the optimized parameters, in addition to \theta=0 circuits.
# use something like the following. It will need to be modified for your directory and whether you imposed permutation symmetry
def submit_saved_params(n,l,hx,backend_name,hz=0.1,rand_compile=True,noise_scale=1):
from energy_evaluation import submit_ising, submit_ising_symm
my_directory = '/your_directory/saved_parameters/'
if not hasattr(l,'__iter__'):
l = [l]
if not hasattr(hx,'__iter__'):
hx = [hx]
for li in l:
if n < 20 or (n == 20 and li < 10):
symm = False
base_dir = my_directory+'ising/ALAy_cx/'
elif n == 20 and li >= 10:
symm = True
base_dir = my_directory+'ising/ALAy_symm/'
for hxi in hx:
E = float(np.genfromtxt(base_dir+'n'+str(n)+'_l'+str(li)+'_hx'+str(hxi)+'_hz'+str(hz)+'/E.csv'))
theta = np.genfromtxt(base_dir+'n'+str(n)+'_l'+str(li)+'_hx'+str(hxi)+'_hz'+str(hz)+'/theta.csv',delimiter=',')
if not symm:
submit_ising(n,theta,backend_name,shots=1024,hx=hxi,hz=hz,E=E,rand_compile=rand_compile,noise_scale=noise_scale)
elif symm:
submit_ising_symm(n,theta,backend_name,shots=8192,hx=hxi,hz=hz,E=E,input_condensed_theta=False,rand_compile=rand_compile,noise_scale=noise_scale)
def submit_zero_calibration(n,l,backend_name,rand_compile=True,noise_scale=1):
from energy_evaluation import all_ising_Paulis_symm, submit_circuits
if not hasattr(l,'__iter__'):
l = [l]
whichPauli = all_ising_Paulis_symm(n)
for li in l:
theta = np.zeros(n*(li+1))
submit_circuits(theta,whichPauli,backend_name,tags=['zero_theta_calibration'],shots=8192,rand_compile=rand_compile,noise_scale=noise_scale)
## It is important that the backend is not recalibrated between when any of the above jobs run. To check the latest calibration datetime use
def latest_calibration_date(backend_name,n):
from qiskit import IBMQ
from energy_evaluation import load_qubit_map
account = IBMQ.load_account()
backend = account.get_backend(backend_name)
properties = backend.properties()
gates = properties.gates
qubits = properties.qubits
loop_qubits = load_qubit_map(backend_name,n)
sx_dates = [gate.parameters[0].date for gate in gates if gate.gate == 'sx' and gate.qubits[0] in loop_qubits]
cx_dates = [gate.parameters[0].date for gate in gates if gate.gate == 'cx' and gate.qubits[0] in loop_qubits and gate.qubits[1] in loop_qubits]
em_dates = [ qubits[q][4].date for q in loop_qubits]
return max( max(cx_dates), max(sx_dates), max(em_dates) )
## you might also want to check the latest calibration date at a time when a job ran. To do this, use:
def latest_calibration_date_from_job(job_id):
from qiskit import IBMQ
from energy_evaluation import load_qubit_map, read_from_tags
job = account.backends.retrieve_job(job_id)
n = read_from_tags('n',job.tags())
properties = job.properties()
backend_name = job.backend().name()
gates = properties.gates
qubits = properties.qubits
loop_qubits = load_qubit_map(backend_name,n,0)
sx_dates = [gate.parameters[0].date for gate in gates if gate.gate == 'sx' and gate.qubits[0] in loop_qubits]
cx_dates = [gate.parameters[0].date for gate in gates if gate.gate == 'cx' and gate.qubits[0] in loop_qubits and gate.qubits[1] in loop_qubits]
em_dates = [ qubits[q][4].date for q in loop_qubits]
return max( max(cx_dates), max(sx_dates), max(em_dates) )
############ Step 3 ###############
## Now that your circuits have run successfully, it is time to analyze the results.
## Use the following functions to compare the observed damping factors to the predicted damping factors.
# The observed damping factor for a given job, with or without readout error mitigation applied, is
def damping_from_job(job,readout_mitigate=True,readout_calibration_job=[]):
from energy_evaluation import ising_energy_from_job
E_exact = read_from_tags('E',job.tags())
E_meas, dE_meas = ising_energy_from_job(job,readout_mitigate,readout_calibration_job)
print('E_meas = '+str(E_meas))
print('dE_meas = '+str(dE_meas))
damping = E_meas/E_exact
d_damping = abs(dE_meas/E_exact)
return damping, d_damping
## We use several methods of predicting the damping factor:
def plot_figs(backend,n=20,hx=1.5,hz=0.1,l_all=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,25,30,35,40,45,50],readout_mitigate=True,plot_ZNE=False,load_from_saved=False,threshold=0.1,plot_ZNE_calib=False,plot_from_pert=False):
import matplotlib.pyplot as plt
from matplotlib import container
from matplotlib import colors
import pickle
## first, retrieve the data:
if readout_mitigate:
filename = backend.name()+'_n'+str(n)+".p"
else:
filename = backend.name()+'_n'+str(n)+"_no_readout_mitigation.p"
if load_from_saved:
damping, d_damping, rel_error, d_rel_error = pickle.load( open( filename, "rb" ) )
else:
damping = {}
d_damping = {}
rel_error = {}
d_rel_error = {}
methods = ['raw','from pert','from small $l$',r'$\theta = 0$']
for method in methods:
damping[method] = np.empty(len(l_all))
damping[method][:] = np.nan
d_damping[method] = np.empty(len(l_all))
d_damping[method][:] = np.nan
methods_ZNE = ['ZNE',r'$\theta = 0$ + ZNE first',r'$\theta = 0$ + ZNE last']
rel_error = {}
d_rel_error = {}
for method in methods + methods_ZNE:
rel_error[method] = np.empty(len(l_all))
rel_error[method][:] = np.nan
d_rel_error[method] = np.empty(len(l_all))
d_rel_error[method][:] = np.nan
fit_shallow = small_l_fit(backend,n,hx,hz,max_l=15,readout_mitigate=readout_mitigate)
for _ in range(len(l_all)):
l = l_all[_]
print('starting l = '+str(l))
if l > 0:
limit = 3
else:
limit = 1
jobs = backend.jobs(limit=limit,job_tags=['n = '+str(n),'l = '+str(l),'hx = '+str(hx),'hz = '+str(hz)],job_tags_operator='AND')
jobs_calib = backend.jobs(limit=limit,job_tags=['zero_theta_calibration','n = '+str(n),'l = '+str(l)],job_tags_operator='AND')
for job in jobs:
if read_from_tags('noise_scale',job.tags()) == 1.0:
break
for job_calib in jobs_calib:
if read_from_tags('noise_scale',job_calib.tags()) == 1.0:
break
damping['raw'][_], d_damping['raw'][_] = damping_from_job(job,readout_mitigate)
damping['from pert'][_], d_damping['from pert'][_] = damping_est_pert(job,readout_mitigate,plot=plot_from_pert,damping1_5=damping['raw'][_],d_damping1_5=d_damping['raw'][_])
damping['from small $l$'][_], d_damping['from small $l$'] = pred_from_fit(l,fit_shallow)
damping[r'$\theta = 0$'][_], d_damping[r'$\theta = 0$'][_] = damping_from_zero_theta_energy(job_calib,hx,hz,readout_mitigate)
if l > 0:
rel_error['ZNE'][_], d_rel_error['ZNE'][_] = ZNE(jobs,readout_mitigate=readout_mitigate,plot=plot_ZNE)
rel_error[r'$\theta = 0$ + ZNE last'][_], d_rel_error[r'$\theta = 0$ + ZNE last'][_] = damping_zero_theta_ZNE(jobs,jobs_calib,order='extrapolate_last',readout_mitigate=readout_mitigate,plot=plot_ZNE_calib)
rel_error[r'$\theta = 0$ + ZNE first'][_], d_rel_error[r'$\theta = 0$ + ZNE first'][_] = damping_zero_theta_ZNE(jobs,jobs_calib,order='extrapolate_first',readout_mitigate=readout_mitigate,plot=plot_ZNE_calib)
for method in rel_error:
rel_error[method][_] -= 1
for method in damping:
if (method != 'raw' and method != 'ZNE'):
rel_error[method] = damping['raw']/damping[method] - 1
d_rel_error[method] = np.sqrt( (d_damping['raw']/damping[method])**2 + (damping['raw']*d_damping[method]/damping[method]**2))
elif method == 'raw':
rel_error[method] = damping[method] - 1
d_rel_error[method] = d_damping[method]
pickle.dump( (damping, d_damping, rel_error, d_rel_error), open( filename, "wb" ) )
## now plot
markers = ['o','v','^','<','>','s','P','*','+','x','D']
marker_i = 0
### damping:
fig, ax = plt.subplots()
for method in damping:
if method == 'raw':
plt.errorbar(l_all,damping[method],d_damping[method],label='true damping factor',linewidth=3,capsize=4,fmt=markers[marker_i])
else:
plt.errorbar(l_all,damping[method],d_damping[method],label='predicted, '+method,linewidth=3,capsize=4,fmt=markers[marker_i]+'-')
marker_i += 1
plt.xlabel('number of ansatz layers',fontsize = 20)
plt.ylabel('actual or predicted damping factor',fontsize = 18)
# removing error bars from legend using https://swdg.io/2015/errorbar-legends/
handles, labels = ax.get_legend_handles_labels()
new_handles = []
for h in handles:
#only need to edit the errorbar legend entries
if isinstance(h, container.ErrorbarContainer):
new_handles.append(h[0])
else:
new_handles.append(h)
ax.legend(new_handles, labels,loc='best',prop={'size': 11})
plt.ylim((1e-2,1))
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.title(backend.name(),fontsize=20)
plt.yscale('log')
fig.tight_layout()
### relative error:
marker_i = 0
fig, ax = plt.subplots()
for method in rel_error:
plt.errorbar(l_all,rel_error[method],d_rel_error[method],label=method,linewidth=3,capsize=4,fmt=markers[marker_i]+'-')
marker_i += 1
plt.xlabel('number of ansatz layers',fontsize = 20)
plt.ylabel('relative error',fontsize = 18)
# removing error bars from legend using https://swdg.io/2015/errorbar-legends/
handles, labels = ax.get_legend_handles_labels()
new_handles = []
for h in handles:
#only need to edit the errorbar legend entries
if isinstance(h, container.ErrorbarContainer):
new_handles.append(h[0])
else:
new_handles.append(h)
ax.legend(new_handles, labels,loc='best',prop={'size': 11})
#plt.legend(loc='best',prop={'size': 11})
plt.plot([min(l_all),max(l_all)],[threshold,threshold],'k--',linewidth=2)
plt.plot([min(l_all),max(l_all)],[-threshold,-threshold],'k--',linewidth=2)
plt.ylim((-1,3))
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
plt.title(backend.name(),fontsize=20)
fig.tight_layout()
cmap = colors.ListedColormap(np.array([[255,255,204],[161,218,180],[65,182,196],[34,94,168]])/255)
scores = {}
for method in rel_error:
scores[method] = rel_error_score(rel_error[method],d_rel_error[method],threshold)
fig, ax = plt.subplots()
im = ax.imshow(list(scores.values()),cmap=cmap)
# Loop over data dimensions and create text annotations.
for i in range(len(scores)):
for j in range(len(l_all)):
if not np.isnan(list(scores.values())[i][j]):
text = ax.text(j, i, list(scores.values())[i][j], ha="center", va="center", color="k")
# We want to show all ticks...
ax.set_xticks(np.arange(len(l_all)))
ax.set_yticks(np.arange(len(scores)))
# ... and label them with the respective list entries
ax.set_xticklabels(l_all)
ax.set_yticklabels(list(scores.keys()))
plt.xlabel('number of ansatz layers',fontsize = 18)
#plt.ylabel('mitigation method',fontsize = 15)
plt.title(str(n)+' qubits, '+backend.name(),fontsize=15)
ax.tick_params(axis='y', which='major', labelsize=12)
ax.tick_params(axis='y', which='minor', labelsize=12)
ax.tick_params(axis='x', which='major', labelsize=11)
ax.tick_params(axis='x', which='minor', labelsize=11)
fig.tight_layout()
plt.show()
# From the perturbative regime:
def damping_est_pert(job,readout_mitigate=True,calibration_job=[],noise_scale=1,plot=False,damping1_5=0,d_damping1_5=0):
backend = job.backend()
tags = job.tags()
n = read_from_tags('n',tags)
hz = read_from_tags('hz',tags)
symm = 'symm' in tags
l = read_from_tags('l',tags)
hx_pert = [0.1,0.2,0.3,0.4,0.5]
damping_all = []
d_damping_all = []
for hx in hx_pert:
desired_tags = ['Ising','l = '+str(l),'hx = '+str(hx),'n = '+str(n),'hz = '+str(hz),'noise_scale = '+str(noise_scale)]
if symm:
desired_tags.append('symm')
job_pert = backend.jobs(limit=1,job_tags=desired_tags,job_tags_operator='AND')[0]
damping_i, d_damping_i = damping_from_job(job_pert,readout_mitigate,calibration_job)
damping_all.append(damping_i)
d_damping_all.append(d_damping_i)
damping = np.mean(damping_all)
d_damping = np.std(damping_all)/np.sqrt(len(hx_pert))
if plot:
import matplotlib.pyplot as plt
hx = hx_pert + [1.5]
damping_all.append(damping1_5)
d_damping_all.append(d_damping1_5)
plt.errorbar(hx,damping_all,d_damping_all,fmt='.',capsize=4,label='observed damping factors')
plt.plot([min(hx),max(hx)],[damping,damping],'k')
plt.plot([min(hx),max(hx)],[damping+d_damping,damping+d_damping],'k--')
plt.plot([min(hx),max(hx)],[damping-d_damping,damping-d_damping],'k--')
plt.legend(loc='best',prop={'size':15})
plt.xlabel('$h_x$', fontsize=20)
plt.ylabel('damping factor',fontsize=20)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
if l != 1:
plt.title(backend.name()+', '+str(l)+' ansatz layers',fontsize=20)
else:
plt.title(backend.name()+', '+str(l)+' ansatz layer',fontsize=20)
plt.tight_layout()
plt.show()
return damping, d_damping
# ZNE:
def ZNE(jobs,readout_mitigate=True,plot=True):
import matplotlib.pyplot as plt
from matplotlib import container
scales = [read_from_tags('noise_scale',j.tags()) for j in jobs]
dampings = []
d_dampings = []
for job in jobs:
damping, d_damping = damping_from_job(job,readout_mitigate=readout_mitigate)
dampings.append(damping)
d_dampings.append(d_damping)
from scipy.optimize import curve_fit
try:
fit = curve_fit(exp_fit,scales,dampings,p0=[1,0.5],sigma=d_dampings,absolute_sigma=True)
failed = False
except:
print('error: fit failed')
failed = True
if plot:
fig, ax = plt.subplots()
print('scales = '+str(scales))
print('dampings = '+str(dampings))
plt.errorbar(scales,dampings,d_dampings,label='measured energy/exact energy',linewidth=3,capsize=4)
if not failed:
plt.plot(np.linspace(0,max(scales),100),exp_fit(np.linspace(0,max(scales),100),fit[0][0], fit[0][1]),label='exponential fit')
plt.xlabel('noise scale',fontsize = 18)
plt.ylabel('energy/exact energy',fontsize = 18)
plt.xlim([0,max(scales)])
dampings = np.array(dampings)
d_dampings = np.array(d_dampings)
plt.ylim([min(dampings-d_dampings),max(max(dampings+d_dampings), exp_fit(0,fit[0][0], fit[0][1]), 1)])
plt.yscale('log')
ax.tick_params(axis='both', which='major', labelsize=15)
ax.tick_params(axis='both', which='minor', labelsize=15)
# removing error bars from legend using https://swdg.io/2015/errorbar-legends/
handles, labels = ax.get_legend_handles_labels()
new_handles = []
for h in handles:
#only need to edit the errorbar legend entries
if isinstance(h, container.ErrorbarContainer):
new_handles.append(h[0])
else:
new_handles.append(h)
ax.legend(new_handles, labels,loc='best',prop={'size': 11})
fig.tight_layout()
plt.show()
if failed:
return float('nan'), float('nan')
else:
return pred_from_fit(0,fit)
def ZNE_zero_theta(jobs,hx,hz,readout_mitigate=True,plot=True):
import matplotlib.pyplot as plt
scales = [read_from_tags('noise_scale',j.tags()) for j in jobs]
dampings = []
d_dampings = []
for job in jobs:
damping, d_damping = damping_from_zero_theta_energy(job,hx,hz,readout_mitigate=readout_mitigate)
dampings.append(damping)
d_dampings.append(d_damping)
from scipy.optimize import curve_fit
try:
fit = curve_fit(exp_fit,scales,dampings,p0=[1,0.5],sigma=d_dampings,absolute_sigma=True)
except:
print('error: fit failed')
return float('nan'), float('nan')
if plot:
plt.errorbar(scales,dampings,d_dampings,label='data')
plt.plot(np.linspace(0,max(scales),100),exp_fit(np.linspace(0,max(scales),100),fit[0][0], fit[0][1]),label='fit')
plt.legend(loc='best')
plt.xlabel('noise scale')
plt.ylabel('damping factor')
plt.xlim([0,max(scales)])
plt.show()
return pred_from_fit(0,fit)
def damping_zero_theta_ZNE(jobs_ZNE,jobs_ZNE_zero_theta,order='extrapolate_last',readout_mitigate=True,plot=True):
hx = read_from_tags('hx',jobs_ZNE[0].tags())
hz = read_from_tags('hz',jobs_ZNE[0].tags())
if order == 'extrapolate_first':
damping, d_damping = ZNE(jobs_ZNE,readout_mitigate=readout_mitigate,plot=plot)
damping_zero_theta, d_damping_zero_theta = ZNE_zero_theta(jobs_ZNE_zero_theta,hx,hz,readout_mitigate=readout_mitigate,plot=plot)
return damping/damping_zero_theta, np.sqrt( (d_damping/damping_zero_theta)**2 + (damping*d_damping_zero_theta/damping_zero_theta**2)**2)
elif order == 'extrapolate_last':
dampings = []
d_dampings = []
scales = []
for i in range(len(jobs_ZNE)):
job = jobs_ZNE[i]
job_calib = jobs_ZNE_zero_theta[i]
scales.append(read_from_tags('noise_scale',job.tags()))
damping, d_damping = damping_from_job(job,readout_mitigate=readout_mitigate)
damping_calib, d_damping_calib = damping_from_zero_theta_energy(job_calib,hx,hz,readout_mitigate=readout_mitigate)
dampings.append(damping/damping_calib)
d_dampings.append(np.sqrt( (d_damping/damping_calib)**2 + (damping*d_damping_calib/damping_calib**2)**2))
print('scale = '+str(scales[-1]))
print('damping_i = '+str(dampings[-1]))
print('d_damping_i = '+str(d_dampings[-1]))
from scipy.optimize import curve_fit
try:
fit = curve_fit(exp_fit,scales,dampings,p0=[1,0.5],sigma=d_dampings,absolute_sigma=True)
except:
print('error: fit failed')
return float('nan'), float('nan')
if plot:
import matplotlib.pyplot as plt
plt.errorbar(scales,dampings,d_dampings,label='data')
plt.plot(np.linspace(0,max(scales),100),exp_fit(np.linspace(0,max(scales),100),fit[0][0], fit[0][1]),label='fit')
plt.legend(loc='best')
plt.xlabel('noise scale')
plt.ylabel('damping factor')
plt.xlim([0,max(scales)])
plt.show()
return pred_from_fit(0,fit)
# from small l:
def exp_fit(l,A,b):
return A*np.exp(-b*l)
def small_l_fit(backend,n,hx,hz,max_l=15,readout_mitigate=True,noise_scale=1):
from scipy.optimize import curve_fit
l_all = range(0,max_l+1)
damping_all = []
d_damping_all = []
for l in l_all:
desired_tags = ['Ising','l = '+str(l),'hx = '+str(hx),'n = '+str(n),'hz = '+str(hz)]
jobs = backend.jobs(limit=3,job_tags=desired_tags,job_tags_operator='AND')
for job in jobs:
if read_from_tags('noise_scale',job.tags()) == noise_scale:
break
damping_i, d_damping_i = damping_from_job(job,readout_mitigate)
damping_all.append(damping_i)
d_damping_all.append(d_damping_i)
fit_shallow = curve_fit(exp_fit,l_all,damping_all,p0=[1,0.5],sigma=d_damping_all,absolute_sigma=True)
return fit_shallow
def pred_from_fit(l,fit,size=100000):
rng = np.random.default_rng()
params = rng.multivariate_normal(fit[0],fit[1],size=size)
est = exp_fit(l,params[:,0],params[:,1])
return np.mean(est), np.std(est)
# from zero theta calibration:
def Minv_uncorrelated_uncertainty(e0_0_pop, e1_0_pop, e0_1_pop, e1_1_pop, shots):
num_trials = 100000
rng = np.random.default_rng()
Minv = []
for trial in range(num_trials):
e0_0 = rng.binomial(shots,e0_0_pop)/shots
e1_0 = rng.binomial(shots,e1_0_pop)/shots
e0_1 = rng.binomial(shots,e0_1_pop)/shots
e1_1 = rng.binomial(shots,e1_1_pop)/shots
M = [[ (1 - e0_0)*(1-e0_1), e1_0*(1-e0_1), e1_1*(1-e0_0), e1_0*e1_1], \
[e0_0*(1-e0_1), (1-e1_0)*(1-e0_1), e0_0*e1_1, e1_1*(1-e1_0)], \
[e0_1*(1-e0_0), e1_0*e0_1, (1-e0_0)*(1-e1_1), (1-e1_1)*e1_0], \
[e0_1*e0_0, (1-e1_0)*e0_1, e0_0*(1-e1_1), (1-e1_1)*(1-e1_0)]]
Minv.append( np.linalg.inv(M))
return np.mean(Minv,axis=0), np.std(Minv,axis=0)
def damping_from_zero_theta_energy(zero_calib_job,hx,hz,readout_mitigate=True,readout_calibrate_job=[]):
from energy_evaluation import ising_energy_from_job, energy_from_job
E_exact = -2*(1+hz)
coeffs = [-1 for _ in range(2)] + [-hx for _ in range(2)] + [-hz for _ in range(2)]
E_meas, dE_meas = energy_from_job(zero_calib_job,coeffs,readout_mitigate,readout_calibrate_job)
damping = E_meas/E_exact
d_damping = abs(dE_meas/E_exact)
return damping, d_damping
# finally, we have the two methods which estimate the damping from the reported error rates
# simulating with qiskit aer noise model (not scalable):
def noise_model_from_properties(properties,include_gate_errors=True,include_readout_errors=True):
from qiskit.providers.aer.noise import device, NoiseModel
gates = properties.gates
basis_gates = list({g.gate for g in gates})
noise_model = NoiseModel(basis_gates=basis_gates)
if include_gate_errors:
gate_errors = device.basic_device_gate_errors(properties)
for gate_error in gate_errors:
noise_model.add_quantum_error(gate_error[2],gate_error[0],gate_error[1])
if include_readout_errors:
readout_errors = device.basic_device_readout_errors(properties)
for readout_error in readout_errors:
noise_model.add_readout_error(readout_error[1], readout_error[0])
return noise_model
def simulate_job(job,include_noise=True,gpu=True,include_gate_errors=True,include_readout_errors=True,density_matrix=True):
# the gpu option requires qiskit-aer-gpu
from qiskit import QuantumCircuit, execute, Aer, IBMQ
import qiskit.providers.aer.noise as noise
from qiskit.providers.aer import QasmSimulator
from energy_evaluation import ansatz_circuit, load_qubit_map, read_from_tags, cycle_QuantumCircuit, energy_from_counts
backend = job.backend()
machine = backend.name()
tags = job.tags()
n = read_from_tags('n',tags)
hx = read_from_tags('hx',tags)
hz = read_from_tags('hz',tags)
E = read_from_tags('E',tags)
l = read_from_tags('l',tags)
paulis = read_from_tags('whichPauli',tags)
configs = read_from_tags('configs',tags)
theta = read_from_tags('theta',tags)
symm = 'symm' in tags
if include_noise:
#noise_model = noise.NoiseModel.from_backend(backend)
noise_model = noise_model_from_properties(job.properties(),include_gate_errors,include_readout_errors)
# Get coupling map from backend
coupling_map = backend.configuration().coupling_map
# Get basis gates from noise model
basis_gates = noise_model.basis_gates
if gpu and density_matrix:
simulator = QasmSimulator(method='density_matrix_gpu')
elif not gpu and density_matrix:
simulator = QasmSimulator(method='density_matrix',max_parallel_threads=30)
elif gpu and not density_matrix:
simulator = QasmSimulator(method='statevector_gpu')
elif not gpu and not density_matrix:
simulator = QasmSimulator(method='statevector')
qubits0 = load_qubit_map(machine,n)
qc = []
multi_theta = len( np.shape(theta) ) > 1
if not multi_theta:
theta = [theta]
for theta_i in theta:
for i in range(len(paulis)):
pauli = paulis[i]
for config in configs[i]:
qc_i = ansatz_circuit(theta_i,pauli,rand_compile=False,noise_scale=1)
qc_i = cycle_QuantumCircuit(qc_i,config)
qc.append(qc_i)
if include_noise:
job2 = execute(qc, simulator, basis_gates=basis_gates, noise_model=noise_model,coupling_map=coupling_map,initial_layout=qubits0)
else:
job2 = execute(qc, Aer.get_backend('qasm_simulator'))
counts = job2.result().get_counts()
if symm:
coeffs = [-1 for _ in range(2)] + [-hx for _ in range(2)] + [-hz for _ in range(2)]
coeffs = np.array(coeffs) * n//2
else:
coeffs = [-1 for _ in range(n)] + [-hx for _ in range(n)] + [-hz for _ in range(n)]
E, dE = energy_from_counts(counts,coeffs)
return E, dE
def damping_from_aer_simulation(job,include_noise=True,gpu=True,include_gate_errors=True,include_readout_errors=True,density_matrix=True):
# readout error is included in the aer simulation, so this should be compared to the measured dampings without readout error mitigation
E_exact = read_from_tags('E',job.tags())
E_pred, dE_pred = simulate_job(job,include_noise,gpu,include_gate_errors,include_readout_errors,density_matrix)
damping = E_pred/E_exact
d_damping = dE_pred/E_exact
return damping, d_damping
# multiplying fidelities:
def energy_from_job_mult_fidelities(job,coeffs):
from library import damping_from_fidelities
counts = job.result().get_counts()
tags = job.tags()
whichPauli_all = read_from_tags('whichPauli',tags)
n = read_from_tags('n',tags)
l = read_from_tags('l',tags)
configs = read_from_tags('configs',tags)
num_configs = len(configs[0])
num_thetas = len(counts)//(num_configs*len(whichPauli_all))
num_terms = len(whichPauli_all)
multi_coeffs = len(np.shape(coeffs)) == 2
if multi_coeffs:
coeffs_all = coeffs
backend_name = job.backend().name()
qubits = load_qubit_map(backend_name,n)
properties = job.properties()
e0 = np.array([properties.qubits[q][6].value for q in qubits])
e1 = np.array([properties.qubits[q][5].value for q in qubits])
em = (e0+e1)/2
e1_minus_e0 = e1 - e0
e_cx = [properties.gate_error('cx',[qubits[i],qubits[(i+1)%n]]) for i in range(n)]
e_sx = [properties.gate_error('sx',q) for q in qubits]
E_all = []
dE_all = []
for which_theta in range(num_thetas):
E = 0
dE2 = 0
if multi_coeffs:
coeffs = coeffs_all[which_theta]
for term in range(num_terms):
whichPauli = whichPauli_all[term]
qubits_measured = np.array([i for i in range(n) if whichPauli[i]>0])
for which_config in range(num_configs):
config = configs[term][which_config]
if config >= 0:
qubits_measured_config = np.mod(qubits_measured + config, n)
elif config < 0:
qubits_measured_config = np.mod( -qubits_measured + config + 1, n)
P,dP = P_from_counts(counts[which_theta*num_configs*num_terms + num_configs*term + which_config])
P,dP = readout_error_correct(P,dP,em[qubits_measured_config],e1_minus_e0[qubits_measured_config])
predicted_damping = damping_from_fidelities(l,whichPauli, e_cx, e_sx,config)
P = P/predicted_damping
dP = dP/predicted_damping
E += coeffs[term] * P /num_configs
dE2 += (coeffs[term] * dP /num_configs )**2
E_all.append(E)
dE_all.append(np.sqrt(dE2))
if num_thetas > 1:
return E_all, dE_all
elif num_thetas == 1:
return E_all[0], dE_all[0]
def ising_energy_from_job_mult_fidelities(job):
tags = job.tags()
symm = 'symm' in tags
hx = read_from_tags('hx',tags)
hz = read_from_tags('hz',tags)
n = read_from_tags('n',tags)
if symm: # symmetric ansatz
m = 2
else:
m = n
multi_hx = hasattr(hx,'__iter__')
if not multi_hx:
coeffs = [-1 for _ in range(m)] + [-hx for _ in range(m)] + [-hz for _ in range(m)]
elif multi_hx:
coeffs = [[-1 for _ in range(m)] + [-hxi for _ in range(m)] + [-hz for _ in range(m)] for hxi in hx]
if symm:
coeffs = np.array(coeffs) * n//2 # rescale the coefficients
return energy_from_job_mult_fidelities(job,coeffs)
def damping_mult_fidelities(job):
# this is the damping factor including readout errors
from energy_evaluation import ising_energy_from_job
E_meas, dE_meas = ising_energy_from_job(job)
E_mitigated, dE_mitigated = ising_energy_from_job_mult_fidelities(job)
damping = E_meas/E_mitigated
return damping
################# The following is not finished: ##########################
## more careful readout mitigation:
def qubits_measured_from_job(job):
tags = job.tags()
whichPauli_all = read_from_tags('whichPauli',tags)
configs_all = read_from_tags('configs',tags)
n = read_from_tags('n',tags)
qubits_measured_all = set()
num_terms = len(whichPauli_all)
for term in range(num_terms):
whichPauli = whichPauli_all[term]
configs = configs_all[term]
qubits_measured_0 = np.array([i for i in range(n) if whichPauli[i] > 0])
for config in configs:
if config >= 0:
qubits_measured = (qubits_measured_0 + config) % n
else:
qubits_measured = (-qubits_measured_0 + config + 1) % n
qubits_measured_all.add( frozenset(qubits_measured) )
return qubits_measured_all
def submit_readout_calibration_circuits(n,backend,qubits_measured_all,shots=8192):
from qiskit import QuantumCircuit, execute
from energy_evaluation import load_qubit_map
qc_all = []
qubits = load_qubit_map(backend.name(),n)
for qubits_measured in qubits_measured_all:
qubits_measured = list(qubits_measured)
if len(qubits_measured) == 2:
for x0 in [False, True]:
for x1 in [False, True]:
qc = QuantumCircuit(n,2)
if x0:
qc.x(qubits_measured[0])
if x1:
qc.x(qubits_measured[1])
qc.measure(qubits_measured[0],0)
qc.measure(qubits_measured[1],1)
qc_all.append(qc)
elif len(qubits_measured) == 1:
for x0 in [False,True]:
qc = QuantumCircuit(n,1)
if x0:
qc.x(qubits_measured[0])
qc.measure(qubits_measured[0],0)
qc_all.append(qc)
job = execute(qc_all, backend=backend, shots=shots, initial_layout=qubits, job_tags=['readout_calibration','qubits_measured_all = '+str(qubits_measured_all)])
return job
def submit_readout_calibration_datetimes(n,backend_name,start,end,shots=8192):
from qiskit import IBMQ
account = IBMQ.load_account()
backend = account.get_backend(backend_name)
jobs = backend.jobs(limit=1000,start_datetime=start,end_datetime=end,job_tags=['n = '+str(n)])
qubits_measured_all = set()
print('# jobs = '+str(len(jobs)))
for job in jobs:
qubits_measured_all = qubits_measured_all.union( qubits_measured_from_job(job) )
print('qubits_measured_all = '+str(qubits_measured_all))
return submit_readout_calibration_circuits(n,backend,qubits_measured_all,shots)
def analyze_readout_calibration(calibration_job):
result = calibration_job.result()
shots = result.results[0].shots
counts = result.get_counts()
num_pairs = len(counts)//4
e0 = []
e1 = []
for pair in range(num_pairs):
e0_pair = (counts[4*pair].get('01',0) + counts[4*pair].get('10',0) + counts[4*pair+3].get('01',0) + counts[4*pair+3].get('10',0))/(2*shots)
e1_pair = (counts[4*pair+1].get('00',0) + counts[4*pair+1].get('11',0) + counts[4*pair+2].get('00',0) + counts[4*pair+2].get('11',0))/(2*shots)
e0.append(e0_pair)
e1.append(e1_pair)
e0 = np.array(e0)
e1 = np.array(e1)
return e0, e1
def analyze_readout_calibration_advanced(calibration_job):
result = calibration_job.result()
shots = result.results[0].shots
counts = result.get_counts()
qubits_measured_all = list(read_from_tags('qubits_measured_all',calibration_job.tags()))
circuit = 0
e_1qubit = []
Minv = []
dMinv = []
for qubits_measured in qubits_measured_all:
num_qubits = len(list(counts[circuit])[0])
if num_qubits == 1:
e_1qubit.append( [counts[circuit].get('1',0)/shots, counts[circuit+1].get('0',0)/shots] )
circuit += 2
elif num_qubits == 2:
M = [ [ counts[circuit+j].get(bitstr,0)/shots for j in range(4)] for bitstr in ['00','10','01','11'] ]
M = np.array(M)
#dM = np.sqrt(M*(1-M)/shots)
Minv_i_est, dMinv_i = uncertainty_in_Minv(M,shots)
Minv_i = np.linalg.inv(M)
Minv.append( Minv_i )
dMinv.append(dMinv_i)
circuit += 4
e_1qubit = np.array(e_1qubit)
de_1qubit = np.sqrt(e_1qubit * (1-e_1qubit) /shots)
return e_1qubit, Minv, de_1qubit, dMinv
def uncertainty_in_Minv(M,shots):
rng = np.random.default_rng()
trials = 10000
Minv = []
for trial in range(trials):
M_trial = (np.array([ rng.multinomial(shots,column) for column in M.T ]).T)/shots
Minv.append( np.linalg.inv(M_trial) )
return np.mean(Minv,axis=0), np.std(Minv,axis=0)
# plotting:
def rel_error(damping,d_damping,damping_est,d_damping_est):
damping = np.array(damping)
damping_est = np.array(damping_est)
d_damping = np.array(d_damping)
d_damping_est = np.array(d_damping_est)
rel_error = (damping - damping_est)/damping_est
d_rel_error = np.sqrt( (d_damping/damping_est)**2 + (d_damping_est*damping/damping_est**2)**2 )
return [rel_error,d_rel_error]
def rel_error_score(r,dr,th):
# assigns 3, 2, 1, or 0
score = []
for i in range(len(r)):
if -th < r[i] - dr[i] and r[i] + dr[i] < th:
score.append(3)
elif abs(r[i] + dr[i]) < th or abs(r[i] - dr[i]) < th or (r[i] + dr[i] > th and r[i] - dr[i] < - th):
score.append(2)
elif abs(r[i]) - th < 2*dr[i]:
score.append(1)
elif np.isnan(r[i]):
score.append(np.nan)
else:
score.append(0)
return score
def plot_from_machine_layers(n=20,hx=1.5,hz=0.1,start_date = datetime(year=2021,month=4,day=29,hour=0,minute=2,second=30),end_date = datetime(year=2021,month=4,day=30,hour=23,minute=0),backend_name='ibmq_toronto',readout_calibrate = True,threshold=0.1,load_saved=False):
# n=20,hx=1.5,hz=0.1,start_date = datetime(year=2021,month=4,day=13,hour=0,minute=2,second=30),end_date = datetime(year=2021,month=4,day=13,hour=23,minute=0),backend_name='ibmq_toronto'
import matplotlib.pyplot as plt
save_dir = '/your_directory/results/damping_factors/'+backend_name+'/n'+str(n)+'/'
damping_readout = []
d_damping_readout = []
damping_readout_calibrate = []
d_damping_readout_calibrate = []
damping_raw = []
d_damping_raw = []
damping_pert_readout = []
d_damping_pert_readout = []
damping_pert_raw = []
d_damping_pert_raw = []
damping_pert_readout_calibrate = []
d_damping_pert_readout_calibrate = []
damping_zero_fid_readout = []
d_damping_zero_fid_readout = []
damping_zero_fid_raw = []
d_damping_zero_fid_raw = []
damping_zero_fid_readout_calibrate = []
d_damping_zero_fid_readout_calibrate = []
damping_zero_energy_readout = []
d_damping_zero_energy_readout = []
damping_zero_energy_raw = []
d_damping_zero_energy_raw = []
damping_zero_energy_readout_calibrate = []
d_damping_zero_energy_readout_calibrate = []
l = []
if not load_saved:
from qiskit import IBMQ
account = IBMQ.load_account()
backend = account.get_backend(backend_name)
jobs = backend.jobs(limit=1000,start_datetime=start_date,end_datetime=end_date,job_tags=['hx = '+str(hx),'n = '+str(n),'hz = '+str(hz)],job_tags_operator='AND',status='DONE') + backend.jobs(limit=1000,start_datetime=start_date,end_datetime=end_date,job_tags=['hx = '+str([hx]),'n = '+str(n),'hz = '+str(hz)],job_tags_operator='AND',status='DONE')
if readout_calibrate:
readout_calibrate_job = backend.jobs(limit=1,start_datetime=start_date,end_datetime=end_date,job_tags=['readout_calibration'],status='DONE')[0]
else:
readout_calibrate_job = []
for job in jobs:
l.append(read_from_tags('l',job.tags()))
print('l = '+str(l[-1]))
if readout_calibrate:
damping_readout_calibrate_i, d_damping_readout_calibrate_i = damping_from_job(job, True, readout_calibrate_job)
damping_readout_calibrate.append(damping_readout_calibrate_i)
d_damping_readout_calibrate.append(d_damping_readout_calibrate_i)
damping_readout_i, d_damping_readout_i = damping_from_job(job, True)
damping_readout.append(damping_readout_i)
d_damping_readout.append(d_damping_readout_i)
damping_raw_i, d_damping_raw_i = damping_from_job(job, False)
damping_raw.append(damping_raw_i)
d_damping_raw.append(d_damping_raw_i)
if not (l[-1] == 10 and backend_name == 'ibmq_toronto' and n == 12 and start_date.month==4 and start_date.day==26):
damping_pert_readout_i, d_damping_pert_readout_i = damping_est_pert(job,readout_mitigate=True)
damping_pert_raw_i, d_damping_pert_raw_i = damping_est_pert(job,readout_mitigate=False)
elif l[-1] == 10:
damping_pert_readout_i = np.nan
d_damping_pert_readout_i = np.nan
damping_pert_raw_i = np.nan
d_damping_pert_raw_i = np.nan
damping_pert_readout_calibrate_i = np.nan
d_damping_pert_readout_calibrate_i = np.nan
damping_pert_raw.append(damping_pert_raw_i)
d_damping_pert_raw.append(d_damping_pert_raw_i)
damping_pert_readout.append(damping_pert_readout_i)
d_damping_pert_readout.append(d_damping_pert_readout_i)
if readout_calibrate and not (l[-1] == 10 and backend_name == 'ibmq_toronto' and n == 12 and start_date.month==4 and start_date.day==26):
damping_pert_readout_calibrate_i, d_damping_pert_readout_calibrate_i = damping_est_pert(job,True,readout_calibrate_job)
if readout_calibrate:
damping_pert_readout_calibrate.append(damping_pert_readout_calibrate_i)
d_damping_pert_readout_calibrate.append(d_damping_pert_readout_calibrate_i)
zero_calib_job = backend.jobs(limit=1,start_datetime=start_date,end_datetime=end_date,job_tags=['zero_theta_calibration', 'l = '+str(l[-1]),'n = '+str(n)],job_tags_operator='AND',status='DONE')[0]
damping_zero_fid_raw_i, d_damping_zero_fid_raw_i = damping_from_zero_theta_fidelity(zero_calib_job,False,None)
damping_zero_fid_readout_i, d_damping_zero_fid_readout_i = damping_from_zero_theta_fidelity(zero_calib_job,True,None)
if readout_calibrate:
damping_zero_fid_readout_calibrate_i, d_damping_zero_fid_readout_calibrate_i = damping_from_zero_theta_fidelity(zero_calib_job,True,readout_calibrate_job)
damping_zero_fid_readout_calibrate.append(damping_zero_fid_readout_calibrate_i)
d_damping_zero_fid_readout_calibrate.append(d_damping_zero_fid_readout_calibrate_i)
damping_zero_fid_raw.append(damping_zero_fid_raw_i)
d_damping_zero_fid_raw.append(d_damping_zero_fid_raw_i)
damping_zero_fid_readout.append(damping_zero_fid_readout_i)
d_damping_zero_fid_readout.append(d_damping_zero_fid_readout_i)
damping_zero_energy_raw_i, d_damping_zero_energy_raw_i = damping_from_zero_theta_energy(zero_calib_job,hx,hz,False,[])
damping_zero_energy_readout_i, d_damping_zero_energy_readout_i = damping_from_zero_theta_energy(zero_calib_job,hx,hz,True,[])
if readout_calibrate:
damping_zero_energy_readout_calibrate_i, d_damping_zero_energy_readout_calibrate_i = damping_from_zero_theta_energy(zero_calib_job,hx,hz,True,readout_calibrate_job)
damping_zero_energy_readout_calibrate.append(damping_zero_energy_readout_calibrate_i)
d_damping_zero_energy_readout_calibrate.append(d_damping_zero_energy_readout_calibrate_i)
damping_zero_energy_raw.append(damping_zero_energy_raw_i)
d_damping_zero_energy_raw.append(d_damping_zero_energy_raw_i)
damping_zero_energy_readout.append(damping_zero_energy_readout_i)
d_damping_zero_energy_readout.append(d_damping_zero_energy_readout_i)
l = np.array(l,dtype=np.int)
np.savetxt(save_dir+'l.csv',l)
np.savetxt(save_dir+'damping_readout.csv',damping_readout)
np.savetxt(save_dir+'d_damping_readout.csv',d_damping_readout)
np.savetxt(save_dir+'damping_readout_calibrate.csv',damping_readout_calibrate)
np.savetxt(save_dir+'d_damping_readout_calibrate.csv',d_damping_readout_calibrate)
np.savetxt(save_dir+'damping_raw.csv',damping_raw)
np.savetxt(save_dir+'d_damping_raw.csv',d_damping_raw)
np.savetxt(save_dir+'damping_pert_readout.csv',damping_pert_readout)
np.savetxt(save_dir+'d_damping_pert_readout.csv',d_damping_pert_readout)
np.savetxt(save_dir+'damping_pert_raw.csv',damping_pert_raw)
np.savetxt(save_dir+'d_damping_pert_raw.csv',d_damping_pert_raw)
np.savetxt(save_dir+'damping_pert_readout_calibrate.csv',damping_pert_readout_calibrate)
np.savetxt(save_dir+'d_damping_pert_readout_calibrate.csv',d_damping_pert_readout_calibrate)
np.savetxt(save_dir+'damping_zero_fid_readout.csv',damping_zero_fid_readout)
np.savetxt(save_dir+'d_damping_zero_fid_readout.csv',d_damping_zero_fid_readout)
np.savetxt(save_dir+'damping_zero_fid_raw.csv',damping_zero_fid_raw)
np.savetxt(save_dir+'d_damping_zero_fid_raw.csv',d_damping_zero_fid_raw)
| np.savetxt(save_dir+'damping_zero_fid_readout_calibrate.csv',damping_zero_fid_readout_calibrate) | numpy.savetxt |
# camera-ready
import torch
import torch.utils.data
import numpy as np
import cv2
import os
import pickle
import glob
import time
default_path = os.path.dirname(os.path.abspath(__file__))
apolloscape_data_path = os.path.join(default_path,'data/apolloscapes')
train_data_path_file = os.path.join(apolloscape_data_path,'train_data_path.pkl')
eval_data_path_file = os.path.join(apolloscape_data_path,'eval_data_path.pkl')
train_data_path = []
eval_data_path = []
with open(train_data_path_file, 'rb') as f:
while True:
try:
data = pickle.load(f)
except EOFError:
break
train_data_path.append(data)
with open(eval_data_path_file, 'rb') as f:
while True:
try:
data = pickle.load(f)
except EOFError:
break
eval_data_path.append(data)
class DatasetTrain(torch.utils.data.Dataset):
def __init__(self):
self.img_h = 560
self.img_w = 1280
self.examples = []
for train_dir in train_data_path:
file_dir = os.path.join(train_dir,"*.png")
file_list = glob.glob(file_dir)
for file_path in file_list:
img_path = file_path.replace('Labels_', 'ColorImage_resize_')
img_path = img_path.replace('Label','ColorImage')
img_path = img_path.replace('_bin.png','.jpg')
label_path = file_path.replace('Labels_', 'Trainid_')
if os.path.exists(img_path) and os.path.exists(label_path):
example = {}
example["img_path"] = img_path
example["label_img_path"] = label_path
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_path = example["img_path"]
img = cv2.imread(img_path, -1) # (shape: (560, 1280, 3)
label_img_path = example["label_img_path"]
label_img = cv2.imread(label_img_path, -1) # (shape: (560, 1280))
tic = time.clock()
########################################################################
# randomly scale the img and the label:
########################################################################
scale = np.random.uniform(low=0.7, high=1.5)
new_img_h = int(scale*self.img_h)
new_img_w = int(scale*self.img_w)
# resize img without interpolation (want the image to still match
# label_img, which we resize below):
img = cv2.resize(img, (new_img_w, new_img_h),
interpolation=cv2.INTER_NEAREST) # (shape: (new_img_h, new_img_w, 3))
# resize label_img without interpolation (want the resulting image to
# still only contain pixel values corresponding to an object class):
label_img = cv2.resize(label_img, (new_img_w, new_img_h),
interpolation=cv2.INTER_NEAREST) # (shape: (new_img_h, new_img_w))
########################################################################
# # # # # # # # debug visualization START
# print (scale)
# print (new_img_h)
# print (new_img_w)
#
# cv2.imshow("test", img)
# cv2.waitKey(0)
#
# cv2.imshow("test", label_img)
# cv2.waitKey(0)
# # # # # # # # debug visualization END
########################################################################
# select a 256x256 random crop from the img and label:
########################################################################
start_x = np.random.randint(low=0, high=(new_img_w - 256))
end_x = start_x + 256
start_y = np.random.randint(low=0, high=(new_img_h - 256))
end_y = start_y + 256
img = img[start_y:end_y, start_x:end_x] # (shape: (256, 256, 3))
label_img = label_img[start_y:end_y, start_x:end_x] # (shape: (256, 256))
########################################################################
# flip the img and the label with 0.5 probability:
flip = np.random.randint(low=0, high=2)
if flip == 1:
img = cv2.flip(img, 1)
label_img = cv2.flip(label_img, 1)
# brightness augmentation
factor = 0.5
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
hsv = np.array(hsv, dtype=np.float64)
hsv[:, :, 2] = hsv[:, :, 2] * (factor + np.random.uniform()) #scale channel V uniformly
hsv[:, :, 2][hsv[:, :, 2] > 255] = 255 #reset out of range values
img = cv2.cvtColor(np.array(hsv, dtype=np.uint8), cv2.COLOR_HSV2RGB)
# # # # # # # # debug visualization START
# print (img.shape)
# print (label_img.shape)
#
# cv2.imshow("test", img)
# cv2.waitKey(0)
#
# cv2.imshow("test", label_img)
# cv2.waitKey(0)
# # # # # # # # debug visualization END
# normalize the img (with the mean and std for the pretrained ResNet):
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (256, 256, 3))
img = np.transpose(img, (2, 0, 1)) # (shape: (3, 256, 256))
img = img.astype(np.float32)
# convert numpy -> torch:
img = torch.from_numpy(img) # (shape: (3, 256, 256))
label_img = torch.from_numpy(label_img) # (shape: (256, 256))
toc = time.clock()
print("time: " , str(toc-tic))
return (img, label_img)
def __len__(self):
return self.num_examples
class DatasetVal(torch.utils.data.Dataset):
def __init__(self):
self.img_h = 560
self.img_w = 1280
self.examples = []
for eval_dir in eval_data_path:
file_dir = os.path.join(eval_dir,"*.png")
file_list = sorted(glob.glob(file_dir))
for file_path in file_list:
img_path = file_path.replace('Labels_', 'ColorImage_resize_')
img_path = img_path.replace('Label','ColorImage')
img_path = img_path.replace('_bin.png','.jpg')
label_path = file_path.replace('Labels_', 'Trainid_')
if os.path.exists(img_path) and os.path.exists(label_path):
example = {}
example["img_path"] = img_path
example["label_img_path"] = label_path
self.examples.append(example)
self.num_examples = len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
img_path = example["img_path"]
#print(img_path)
img = cv2.imread(img_path, -1) # (shape: (560, 1280, 3))
label_img_path = example["label_img_path"]
label_img = cv2.imread(label_img_path, -1) # (shape: (560, 1280))
# # # # # # # # debug visualization START
# cv2.imshow("test", img)
# cv2.waitKey(0)
#
# cv2.imshow("test", label_img)
# cv2.waitKey(0)
# # # # # # # # debug visualization END
# normalize the img (with the mean and std for the pretrained ResNet):
img = img/255.0
img = img - np.array([0.485, 0.456, 0.406])
img = img/np.array([0.229, 0.224, 0.225]) # (shape: (560, 1280, 3))
img = | np.transpose(img, (2, 0, 1)) | numpy.transpose |
#! /usr/bin/env python3
import numpy as np
import argparse
from collections import Counter
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import eigsh
from scipy.stats import kstest as KS
from sklearn.preprocessing import scale
from scipy.stats import chi2, norm
###########################################################
## Reproduces results in Section 5.5 - Undirected graphs ##
###########################################################
## Takes a vector and returns its spherical coordinates
def cart_to_sphere(x):
## theta_1
q = np.arccos(x[1] / np.linalg.norm(x[:2]))
sphere_coord = [q] if x[0] >= 0 else [2*np.pi - q]
## Loop for theta_2, ..., theta_m-1
for j in range(2,len(x)):
sphere_coord += [2 * np.arccos(x[j] / np.linalg.norm(x[:(j+1)]))]
## Return the result in a numpy array
return np.array(sphere_coord)
## Takes a matrix and returns the spherical coordinates obtained along the given axis
def theta_transform(X,axis=1):
## Apply the function theta_transform along the axis
return np.apply_along_axis(func1d=cart_to_sphere, axis=axis, arr=X)
## Arguments
n = 1500
M = 250
K = 3
m = 10
## Set seed to repeat the simulation
np.random.seed(171171)
mu = np.array([0.7,0.4,0.1,0.1,0.1,0.5,0.4,0.8,-0.1]).reshape(3,3)
B = np.dot(mu,mu.T)
rho = np.random.beta(a=2,b=1,size=n)
q = np.array([int(x) for x in np.linspace(0,n,num=K,endpoint=False)])
z = np.zeros(n,dtype=int)
for k in range(K):
z[q[k]:] = k
## Effective dimension in theta
p = K - 1
## Define the arrays
skew_pvals = []
kurt_pvals = []
skew_pvals_tilde = []
kurt_pvals_tilde = []
## Repeat M times
for s in range(M):
print('\rIteration: '+str(s),end='')
## Construct the adjacency matrix
rows = []
cols = []
for i in range(n-1):
for j in range(i+1,n):
if np.random.binomial(n=1,p=rho[i]*rho[j]*B[z[i],z[j]],size=1) == 1:
rows += [i,j]
cols += [j,i]
## Obtain the adjacency matrix and the embeddings
A = coo_matrix((np.repeat(1.0,len(rows)),(rows,cols)),shape=(n,n))
S, U = eigsh(A, k=m)
indices = np.argsort(np.abs(S))[::-1]
X = np.dot(U[:,indices], np.diag(np.abs(S[indices]) ** .5))
## Remove empty rows
zero_index = np.array(A.sum(axis=0),dtype=int)[0]
X = X[zero_index > 0]
zz = z[zero_index > 0]
## Calculate the transformations of the embedding
X_tilde = np.divide(X, np.linalg.norm(X,axis=1)[:,np.newaxis])
theta = theta_transform(X)
## Loop over the groups
for k in range(K):
## Number of units in cluster k
nk = np.sum(zz==k)
## Mardia tests for X_tilde
emb_k = X_tilde[zz==k]
emb_k_var = np.linalg.inv(np.cov(emb_k[:,:K].T))
Dk = np.dot(np.dot(scale(emb_k[:,:K],with_std=False),emb_k_var),scale(emb_k[:,:K],with_std=False).T)
## b1 (skewness)
b1 = np.sum(Dk ** 3) / (6*nk)
skew_pvals_tilde += [chi2.logsf(b1, df=K*(K+1)*(K+2)/6)]
## b2 (kurtosis)
b2 = (np.mean(np.diag(Dk) ** 2) - K*(K+2)*(nk-1)/(nk+1)) / np.sqrt(8*p*(p+2) / nk)
kurt_pvals_tilde += [norm.logsf(b2)]
## Repeat the calculations of the Mardia test for theta
emb_k = theta[zz==k]
emb_k_var = np.linalg.inv(np.cov(emb_k[:,:p].T))
Dk = np.dot(np.dot(scale(emb_k[:,:p],with_std=False),emb_k_var),scale(emb_k[:,:p],with_std=False).T)
## b1 (skewness)
b1 = np.sum(Dk ** 3) / (6*nk)
skew_pvals += [chi2.logsf(b1, df=p*(p+1)*(p+2)/6)]
## b2 (kurtosis)
b2 = (np.mean( | np.diag(Dk) | numpy.diag |
"""
This file contains several helper functions to calculate spectral power from
1D and 2D EEG data.
"""
import mne
import logging
import numpy as np
import pandas as pd
from scipy import signal
from scipy.integrate import simps
from scipy.interpolate import RectBivariateSpline
logger = logging.getLogger('yasa')
__all__ = ['bandpower', 'bandpower_from_psd', 'bandpower_from_psd_ndarray',
'irasa', 'stft_power']
def bandpower(data, sf=None, ch_names=None, hypno=None, include=(2, 3),
win_sec=4, relative=True, bandpass=False,
bands=[(0.5, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 16, 'Sigma'), (16, 30, 'Beta'), (30, 40, 'Gamma')],
kwargs_welch=dict(average='median', window='hamming')):
"""
Calculate the Welch bandpower for each channel and, if specified,
for each sleep stage.
.. versionadded:: 0.1.6
Parameters
----------
data : np.array_like or :py:class:`mne.io.BaseRaw`
1D or 2D EEG data. Can also be a :py:class:`mne.io.BaseRaw`, in which
case ``data``, ``sf``, and ``ch_names`` will be automatically
extracted, and ``data`` will also be converted from Volts (MNE default)
to micro-Volts (YASA).
sf : float
The sampling frequency of data AND the hypnogram.
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
ch_names : list
List of channel names, e.g. ['Cz', 'F3', 'F4', ...]. If None,
channels will be labelled ['CHAN000', 'CHAN001', ...].
Can be omitted if ``data`` is a :py:class:`mne.io.BaseRaw`.
hypno : array_like
Sleep stage (hypnogram). If the hypnogram is loaded, the
bandpower will be extracted for each sleep stage defined in
``include``.
The hypnogram must have the exact same number of samples as ``data``.
To upsample your hypnogram, please refer to
:py:func:`yasa.hypno_upsample_to_data`.
.. note::
The default hypnogram format in YASA is a 1D integer
vector where:
- -2 = Unscored
- -1 = Artefact / Movement
- 0 = Wake
- 1 = N1 sleep
- 2 = N2 sleep
- 3 = N3 sleep
- 4 = REM sleep
include : tuple, list or int
Values in ``hypno`` that will be included in the mask. The default is
(2, 3), meaning that the bandpower are sequentially calculated
for N2 and N3 sleep. This has no effect when ``hypno`` is None.
win_sec : int or float
The length of the sliding window, in seconds, used for the Welch PSD
calculation. Ideally, this should be at least two times the inverse of
the lower frequency of interest (e.g. for a lower frequency of interest
of 0.5 Hz, the window length should be at least 2 * 1 / 0.5 =
4 seconds).
relative : boolean
If True, bandpower is divided by the total power between the min and
max frequencies defined in ``band``.
bandpass : boolean
If True, apply a standard FIR bandpass filter using the minimum and
maximum frequencies in ``bands``. Fore more details, refer to
:py:func:`mne.filter.filter_data`.
bands : list of tuples
List of frequency bands of interests. Each tuple must contain the
lower and upper frequencies, as well as the band name
(e.g. (0.5, 4, 'Delta')).
kwargs_welch : dict
Optional keywords arguments that are passed to the
:py:func:`scipy.signal.welch` function.
Returns
-------
bandpowers : :py:class:`pandas.DataFrame`
Bandpower dataframe, in which each row is a channel and each column
a spectral band.
Notes
-----
For an example of how to use this function, please refer to
https://github.com/raphaelvallat/yasa/blob/master/notebooks/08_bandpower.ipynb
"""
# Type checks
assert isinstance(bands, list), 'bands must be a list of tuple(s)'
assert isinstance(relative, bool), 'relative must be a boolean'
assert isinstance(bandpass, bool), 'bandpass must be a boolean'
# Check if input data is a MNE Raw object
if isinstance(data, mne.io.BaseRaw):
sf = data.info['sfreq'] # Extract sampling frequency
ch_names = data.ch_names # Extract channel names
data = data.get_data() * 1e6 # Convert from V to uV
_, npts = data.shape
else:
# Safety checks
assert isinstance(data, np.ndarray), 'Data must be a numpy array.'
data = | np.atleast_2d(data) | numpy.atleast_2d |
import copy
from functools import partial
import networkx as nx
import numpy as np
from scipy.spatial.distance import cosine
def get_kernel_func(kernel_func_name: str):
if kernel_func_name == 'features':
return partial(features_kernel, measure='cos')
if kernel_func_name == 'features_dot':
return partial(features_kernel, measure='dot')
if kernel_func_name == 'WL':
return WL_kernel
raise ValueError(f'kernel function "{kernel_func_name}" not supported.')
def features_kernel(datapoint1: 'MoleculeDatapoint', datapoint2: 'MoleculeDatapoint', measure: str) -> float:
if measure == 'dot':
return np.dot(datapoint1.features, datapoint2.features)
if measure == 'cos':
return 1 - cosine(datapoint1.features, datapoint2.features)
raise ValueError(f'measure "{measure}" not supported.')
def WL_kernel(datapoint1: 'MoleculeDatapoint', datapoint2: 'MoleculeDatapoint') -> float:
for d in [datapoint1, datapoint2]:
if not hasattr(d, 'networkx_graph'):
d.networkx_graph = networkx_graph(d)
# could fiddle with h (depth) later
return GK_WL().compare(datapoint1.networkx_graph, datapoint2.networkx_graph, h=3, node_label=True)
def networkx_graph(d: 'MoleculeDatapoint') -> nx.Graph:
G = nx.Graph()
for atom in d.mol.GetAtoms():
G.add_node(atom.GetIdx(), node_label=str(atom.GetAtomicNum()))
for bond in d.mol.GetBonds():
a1 = bond.GetBeginAtom().GetIdx()
a2 = bond.GetEndAtom().GetIdx()
if a1 < a2:
G.add_edge(a1, a2)
return G
# following is from https://github.com/emanuele/jstsp2015/blob/master/gk_weisfeiler_lehman.py
"""Weisfeiler_Lehman graph kernel.
Python implementation based on: "Weisfeiler-Lehman Graph Kernels", by:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, JMLR, 2012.
http://jmlr.csail.mit.edu/papers/v12/shervashidze11a.html
Author : <NAME>, <NAME>
"""
class GK_WL():
"""
Weisfeiler_Lehman graph kernel.
"""
def compare_list(self, graph_list, h=1, node_label=True):
"""Compute the all-pairs kernel values for a list of graphs.
This function can be used to directly compute the kernel
matrix for a list of graphs. The direct computation of the
kernel matrix is faster than the computation of all individual
pairwise kernel values.
Parameters
----------
graph_list: list
A list of graphs (list of networkx graphs)
h : interger
Number of iterations.
node_label : boolean
Whether to use original node labels. True for using node labels
saved in the attribute 'node_label'. False for using the node
degree of each node as node attribute.
Return
------
K: numpy.array, shape = (len(graph_list), len(graph_list))
The similarity matrix of all graphs in graph_list.
"""
self.graphs = graph_list
n = len(graph_list)
lists = [0] * n
k = [0] * (h + 1)
n_nodes = 0
n_max = 0
# Compute adjacency lists and n_nodes, the total number of
# nodes in the dataset.
for i in range(n):
adjacency = graph_list[i].adjacency()
lists[i] = []
for _, entry in adjacency:
lists[i].append(list(entry.keys()))
n_nodes = n_nodes + graph_list[i].number_of_nodes()
# Computing the maximum number of nodes in the graphs. It
# will be used in the computation of vectorial
# representation.
if(n_max < graph_list[i].number_of_nodes()):
n_max = graph_list[i].number_of_nodes()
phi = np.zeros((n_max, n), dtype=np.uint64)
# INITIALIZATION: initialize the nodes labels for each graph
# with their labels or with degrees (for unlabeled graphs)
labels = [0] * n
label_lookup = {}
label_counter = 0
# label_lookup is an associative array, which will contain the
# mapping from multiset labels (strings) to short labels
# (integers)
if node_label is True:
for i in range(n):
l_aux = list(nx.get_node_attributes(graph_list[i],
'node_label').values())
# It is assumed that the graph has an attribute
# 'node_label'
labels[i] = np.zeros(len(l_aux), dtype=np.int32)
for j in range(len(l_aux)):
if not (l_aux[j] in label_lookup):
label_lookup[l_aux[j]] = label_counter
labels[i][j] = label_counter
label_counter += 1
else:
labels[i][j] = label_lookup[l_aux[j]]
# labels are associated to a natural number
# starting with 0.
phi[labels[i][j], i] += 1
else:
for i in range(n):
labels[i] = np.array(graph_list[i].degree().values())
for j in range(len(labels[i])):
phi[labels[i][j], i] += 1
# Simplified vectorial representation of graphs (just taking
# the vectors before the kernel iterations), i.e., it is just
# the original nodes degree.
self.vectors = np.copy(phi.transpose())
k = np.dot(phi.transpose(), phi)
# MAIN LOOP
it = 0
new_labels = copy.deepcopy(labels)
while it < h:
# create an empty lookup table
label_lookup = {}
label_counter = 0
phi = np.zeros((n_nodes, n))
for i in range(n):
for v in range(len(lists[i])):
# form a multiset label of the node v of the i'th graph
# and convert it to a string
long_label = np.concatenate((np.array([labels[i][v]]),
np.sort(labels[i]
[lists[i][v]])))
long_label_string = str(long_label)
# if the multiset label has not yet occurred, add it to the
# lookup table and assign a number to it
if not (long_label_string in label_lookup):
label_lookup[long_label_string] = label_counter
new_labels[i][v] = label_counter
label_counter += 1
else:
new_labels[i][v] = label_lookup[long_label_string]
# fill the column for i'th graph in phi
aux = np.bincount(new_labels[i])
phi[new_labels[i], i] += aux[new_labels[i]]
phi = phi.astype(np.uint64)
k += np.dot(phi.transpose(), phi)
labels = copy.deepcopy(new_labels)
it = it + 1
# Compute the normalized version of the kernel
k_norm = | np.zeros(k.shape) | numpy.zeros |
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Main module."""
import numpy as np
import itertools as it
import scipy.stats as sps
import scipy.linalg as sl
import os, pickle
from astropy import units as u
import hasasia
from .utils import create_design_matrix
current_path = os.path.abspath(hasasia.__path__[0])
sc_dir = os.path.join(current_path,'sensitivity_curves/')
__all__ =['GWBSensitivityCurve',
'DeterSensitivityCurve',
'Pulsar',
'Spectrum',
'R_matrix',
'G_matrix',
'get_Tf',
'get_NcalInv',
'resid_response',
'HellingsDownsCoeff',
'get_Tspan',
'get_TspanIJ',
'corr_from_psd',
'quantize_fast',
'red_noise_powerlaw',
'Agwb_from_Seff_plaw',
'PI_hc',
'nanograv_11yr_stoch',
'nanograv_11yr_deter',
]
## Some constants
yr_sec = 365.25*24*3600
fyr = 1/yr_sec
def R_matrix(designmatrix, N):
"""
Create R matrix as defined in Ellis et al (2013)
and Demorest et al (2012)
Parameters
----------
designmatrix : array
Design matrix of timing model.
N : array
TOA uncertainties [s]
Returns
-------
R matrix
"""
M = designmatrix
n,m = M.shape
L = np.linalg.cholesky(N)
Linv = np.linalg.inv(L)
U,s,_ = np.linalg.svd(np.matmul(Linv,M), full_matrices=True)
Id = np.eye(M.shape[0])
S = np.zeros_like(M)
S[:m,:m] = np.diag(s)
inner = np.linalg.inv(np.matmul(S.T,S))
outer = np.matmul(S,np.matmul(inner,S.T))
return Id - np.matmul(L,np.matmul(np.matmul(U,outer),np.matmul(U.T,Linv)))
def G_matrix(designmatrix):
"""
Create G matrix as defined in van Haasteren 2013
Parameters
----------
designmatrix : array
Design matrix for a pulsar timing model.
Returns
-------
G matrix
"""
M = designmatrix
n , m = M.shape
U, _ , _ = np.linalg.svd(M, full_matrices=True)
return U[:,m:]
def get_Tf(designmatrix, toas, N=None, nf=200, fmin=None, fmax=2e-7,
freqs=None, exact_astro_freqs = False, from_G=True, twofreqs=False):
"""
Calculate the transmission function for a given pulsar design matrix, TOAs
and TOA errors.
Parameters
----------
designmatrix : array
Design matrix for a pulsar timing model, N_TOA x N_param.
toas : array
Times-of-arrival for pulsar, N_TOA long.
N : array
Covariance matrix for pulsar time-of-arrivals, N_TOA x N_TOA. Often just
a diagonal matrix of inverse TOA errors squared.
nf : int, optional
Number of frequencies at which to calculate transmission function.
fmin : float, optional
Minimum frequency at which to calculate transmission function.
fmax : float, optional
Maximum frequency at which to calculate transmission function.
exact_astro_freqs : bool, optional
Whether to use exact 1/year and 2/year frequency values in calculation.
from_G : bool, optional
Whether to use G matrix for transmission function calculate. If False
R-matrix is used.
"""
if not from_G and N is None:
err_msg = 'Covariance Matrix must be provided if constructing'
err_msg += ' from R-matrix.'
raise ValueError(err_msg)
M = designmatrix
N_TOA = M.shape[0]
## Prep Correlation
t1, t2 = np.meshgrid(toas, toas)
tm = np.abs(t1-t2)
# make filter
T = toas.max()-toas.min()
f0 = 1 / T
if freqs is None:
if fmin is None:
fmin = f0/5
ff = np.logspace(np.log10(fmin), np.log10(fmax), nf,dtype='float128')
if exact_astro_freqs:
ff = np.sort(np.append(ff,[fyr,2*fyr]))
nf +=2
else:
nf = len(freqs)
ff = freqs
Tmat = np.zeros(nf, dtype='float64')
if from_G:
G = G_matrix(M)
m = G.shape[1]
Gtilde = np.zeros((ff.size,G.shape[1]),dtype='complex128')
Gtilde = np.dot(np.exp(1j*2*np.pi*ff[:,np.newaxis]*toas),G)
Tmat = np.matmul(np.conjugate(Gtilde),Gtilde.T)/N_TOA
if twofreqs:
Tmat = np.real(Tmat)
else:
Tmat = np.real(np.diag(Tmat))
else:
R = R_matrix(M, N)
for ct, f in enumerate(ff):
Tmat[ct] = np.real(np.sum(np.exp(1j*2*np.pi*f*tm)*R)/N_TOA)
return np.real(Tmat), ff, T
def get_NcalInv(psr, nf=200, fmin=None, fmax=2e-7, freqs=None,
exact_yr_freqs = False, full_matrix=False,
return_Gtilde_Ncal=False, tm_fit=True):
r"""
Calculate the inverse-noise-wieghted transmission function for a given
pulsar. This calculates
:math:`\mathcal{N}^{-1}(f,f') , \; \mathcal{N}^{-1}(f)`
in `[1]`_, see Equations (19-20).
.. _[1]: https://arxiv.org/abs/1907.04341
Parameters
----------
psr : array
Pulsar object.
nf : int, optional
Number of frequencies at which to calculate transmission function.
fmin : float, optional
Minimum frequency at which to calculate transmission function.
fmax : float, optional
Maximum frequency at which to calculate transmission function.
exact_yr_freqs : bool, optional
Whether to use exact 1/year and 2/year frequency values in calculation.
Returns
-------
inverse-noise-weighted transmission function
"""
toas = psr.toas
# make filter
T = toas.max()-toas.min()
f0 = 1 / T
if freqs is None:
if fmin is None:
fmin = f0/5
ff = np.logspace(np.log10(fmin), np.log10(fmax), nf,dtype='float128')
if exact_yr_freqs:
ff = np.sort(np.append(ff,[fyr,2*fyr]))
nf +=2
else:
nf = len(freqs)
ff = freqs
if tm_fit:
G = G_matrix(psr.designmatrix)
else:
G = np.eye(toas.size)
Gtilde = np.zeros((ff.size,G.shape[1]),dtype='complex128')
#N_freqs x N_TOA-N_par
# Note we do not include factors of NTOA or Timespan as they cancel
# with the definition of Ncal
Gtilde = np.dot(np.exp(1j*2*np.pi*ff[:,np.newaxis]*toas),G)
# N_freq x N_TOA-N_par
Ncal = np.matmul(G.T,np.matmul(psr.N,G)) #N_TOA-N_par x N_TOA-N_par
NcalInv = np.linalg.inv(Ncal) #N_TOA-N_par x N_TOA-N_par
TfN = np.matmul(np.conjugate(Gtilde),np.matmul(NcalInv,Gtilde.T)) / 2
if return_Gtilde_Ncal:
return np.real(TfN), Gtilde, Ncal
elif full_matrix:
return np.real(TfN)
else:
return np.real(np.diag(TfN)) / get_Tspan([psr])
def resid_response(freqs):
r"""
Returns the timing residual response function for a pulsar across as set of
frequencies. See Equation (53) in `[1]`_.
.. math::
\mathcal{R}(f)=\frac{1}{12\pi^2\;f^2}
.. _[1]: https://arxiv.org/abs/1907.04341
"""
return 1/(12 * np.pi**2 * freqs**2)
class Pulsar(object):
"""
Class to encode information about individual pulsars.
Parameters
----------
toas : array
Pulsar Times of Arrival [sec].
toaerrs : array
Pulsar TOA errors [sec].
phi : float
Ecliptic longitude of pulsar [rad].
theta : float
Ecliptic latitude of pulsar [rad].
designmatrix : array
Design matrix for pulsar's timing model. N_TOA x N_param.
N : array
Covariance matrix for the pulsar. N_TOA x N_TOA. Made from toaerrs
if not provided.
pdist : astropy.quantity, float
Earth-pulsar distance. Default units is kpc.
"""
def __init__(self, toas, toaerrs, phi=None, theta=None,
designmatrix=None, N=None, pdist=1.0*u.kpc):
self.toas = toas
self.toaerrs = toaerrs
self.phi = phi
self.theta = theta
self.pdist = make_quant(pdist,'kpc')
if N is None:
self.N = np.diag(toaerrs**2) #N ==> weights
else:
self.N = N
if designmatrix is None:
self.designmatrix = create_design_matrix(toas, RADEC=True,
PROPER=True, PX=True)
else:
self.designmatrix = designmatrix
class Spectrum(object):
"""Class to encode the spectral information for a single pulsar.
Parameters
----------
psr : `hasasia.Pulsar`
A `hasasia.Pulsar` instance.
nf : int, optional
Number of frequencies over which to build the various spectral
densities.
fmin : float, optional [Hz]
Minimum frequency over which to build the various spectral
densities. Defaults to the timespan/5 of the pulsar.
fmax : float, optional [Hz]
Minimum frequency over which to build the various spectral
densities.
freqs : array, optional [Hz]
Optionally supply an array of frequencies over which to build the
various spectral densities.
"""
def __init__(self, psr, nf=400, fmin=None, fmax=2e-7,
freqs=None, tm_fit=True, **Tf_kwargs):
self._H_0 = 72 * u.km / u.s / u.Mpc
self.toas = psr.toas
self.toaerrs = psr.toaerrs
self.phi = psr.phi
self.theta = psr.theta
self.N = psr.N
self.designmatrix = psr.designmatrix
self.pdist = psr.pdist
self.tm_fit = tm_fit
self.Tf_kwargs = Tf_kwargs
if freqs is None:
f0 = 1 / get_Tspan([psr])
if fmin is None:
fmin = f0/5
self.freqs = np.logspace(np.log10(fmin), np.log10(fmax), nf)
else:
self.freqs = freqs
self._psd_prefit = np.zeros_like(self.freqs)
@property
def psd_postfit(self):
"""Postfit Residual Power Spectral Density"""
if not hasattr(self, '_psd_postfit'):
self._psd_postfit = self.psd_prefit * self.NcalInv
return self._psd_postfit
@property
def psd_prefit(self):
"""Prefit Residual Power Spectral Density"""
if np.all(self._psd_prefit==0):
raise ValueError('Must set Prefit Residual Power Spectral Density.')
# print('No Prefit Residual Power Spectral Density set.\n'
# 'Setting psd_prefit to harmonic mean of toaerrs.')
# sigma = sps.hmean(self.toaerrs)
# dt = 14*24*3600 # 2 Week Cadence
# self.add_white_noise_pow(sigma=sigma,dt=dt)
return self._psd_prefit
@property
def Tf(self):
if not hasattr(self, '_Tf'):
self._Tf,_,_ = get_Tf(designmatrix=self.designmatrix,
toas=self.toas, N=self.N,
freqs=self.freqs, from_G=True,
**self.Tf_kwargs)
return self._Tf
@property
def NcalInv(self):
"""Inverse Noise Weighted Transmission Function."""
if not hasattr(self, '_NcalInv'):
self._NcalInv = get_NcalInv(psr=self, freqs=self.freqs,
tm_fit=self.tm_fit)
return self._NcalInv
@property
def S_I(self):
r"""Strain power sensitivity for this pulsar. Equation (74) in `[1]`_
.. math::
S_I=\frac{1}{\mathcal{N}^{-1}\;\mathcal{R}}
.. _[1]: https://arxiv.org/abs/1907.04341
"""
if not hasattr(self, '_S_I'):
self._S_I = 1/resid_response(self.freqs)/self.NcalInv
return self._S_I
@property
def S_R(self):
r"""Residual power sensitivity for this pulsar.
.. math::
S_R=\frac{1}{\mathcal{N}^{-1}}
"""
if not hasattr(self, '_S_R'):
self._S_R = 1/self.NcalInv
return self._S_R
@property
def h_c(self):
r"""Characteristic strain sensitivity for this pulsar.
.. math::
h_c=\sqrt{f\;S_I}
"""
if not hasattr(self, '_h_c'):
self._h_c = np.sqrt(self.freqs * self.S_I)
return self._h_c
@property
def Omega_gw(self):
r"""Energy Density sensitivity.
.. math::
\Omega_{gw}=\frac{2\pi^2}{3\;H_0^2}f^3\;S_I
"""
self._Omega_gw = ((2*np.pi**2/3) * self.freqs**3 * self.S_I
/ self._H_0.to('Hz').value**2)
return self._Omega_gw
def add_white_noise_power(self, sigma=None, dt=None, vals=False):
r"""
Add power law red noise to the prefit residual power spectral density.
**Note:** All noise information is furnished by the covariance matrix in
the `hasasia.Pulsar` object, this is simply useful for bookkeeping and
plots.
Parameters
----------
sigma : float
TOA error.
dt : float
Time between observing epochs in [seconds].
vals : bool
Whether to return the psd values as an array. Otherwise just added
to `self.psd_prefit`.
"""
white_noise = 2.0 * dt * (sigma)**2 * np.ones_like(self.freqs)
self._psd_prefit += white_noise
if vals:
return white_noise
def add_red_noise_power(self, A=None, gamma=None, vals=False):
r"""
Add power law red noise to the prefit residual power spectral density.
As :math:`P=A^2(f/fyr)^{-\gamma}`.
**Note:** All noise information is furnished by the covariance matrix in
the `hasasia.Pulsar` object, this is simply useful for bookkeeping and
plots.
Parameters
----------
A : float
Amplitude of red noise.
gamma : float
Spectral index of red noise powerlaw.
vals : bool
Whether to return the psd values as an array. Otherwise just added
to `self.psd_prefit`.
"""
ff = self.freqs
red_noise = A**2*(ff/fyr)**(-gamma)/(12*np.pi**2) * yr_sec**3
self._psd_prefit += red_noise
if vals:
return red_noise
def add_noise_power(self,noise):
r"""Add any spectrum of noise. Must match length of frequency array.
**Note:** All noise information is furnished by the covariance matrix in
the `hasasia.Pulsar` object, this is simply useful for bookkeeping and
plots.
"""
self._psd_prefit += noise
class SensitivityCurve(object):
r"""
Base class for constructing PTA sensitivity curves. Takes a list of
`hasasia.Spectrum` objects as input.
"""
def __init__(self, spectra):
if not isinstance(spectra, list):
raise ValueError('Must provide list of spectra!!')
self._H_0 = 72 * u.km / u.s / u.Mpc
self.Npsrs = len(spectra)
self.phis = np.array([p.phi for p in spectra])
self.thetas = np.array([p.theta for p in spectra])
self.Tspan = get_Tspan(spectra)
# f0 = 1 / self.Tspan
# if fmin is None:
# fmin = f0/5
#Check to see if all frequencies are equal.
freq_check = [sp.freqs for sp in spectra]
if np.all(freq_check == spectra[0].freqs):
self.freqs = spectra[0].freqs
else:
raise ValueError('All frequency arrays must match for sensitivity'
' curve calculation!!')
self.SnI = np.array([sp.S_I for sp in spectra])
def to_pickle(self, filepath):
self.filepath = filepath
with open(filepath, "wb") as fout:
pickle.dump(self, fout)
@property
def S_eff(self):
"""Strain power sensitivity. """
raise NotImplementedError('Effective Strain Power Sensitivity'
'method must be defined.')
@property
def h_c(self):
"""Characteristic strain sensitivity"""
if not hasattr(self, '_h_c'):
self._h_c = np.sqrt(self.freqs * self.S_eff)
return self._h_c
@property
def Omega_gw(self):
"""Energy Density sensitivity"""
self._Omega_gw = ((2*np.pi**2/3) * self.freqs**3 * self.S_eff
/ self._H_0.to('Hz').value**2)
return self._Omega_gw
@property
def H_0(self):
"""Hubble Constant. Assumed to be in units of km /(s Mpc) unless
supplied as an `astropy.quantity`. """
self._H_0 = make_quant(self._H_0,'km /(s Mpc)')
return self._H_0
class GWBSensitivityCurve(SensitivityCurve):
r"""
Class to produce a sensitivity curve for a gravitational wave
background, using Hellings-Downs spatial correlations.
Parameters
----------
orf : str, optional {'hd', 'st', 'dipole', 'monopole'}
Overlap reduction function to be used in the sensitivity curve.
Maybe be Hellings-Downs, Scalar-Tensor, Dipole or Monopole.
"""
def __init__(self, spectra, orf='hd'):
super().__init__(spectra)
if orf == 'hd':
Coff = HellingsDownsCoeff(self.phis, self.thetas)
elif orf == 'st':
Coff = ScalarTensorCoeff(self.phis, self.thetas)
elif orf == 'dipole':
Coff = DipoleCoeff(self.phis, self.thetas)
elif orf == 'monopole':
Coff = MonopoleCoeff(self.phis, self.thetas)
self.ThetaIJ, self.chiIJ, self.pairs, self.chiRSS = Coff
self.T_IJ = np.array([get_TspanIJ(spectra[ii],spectra[jj])
for ii,jj in zip(self.pairs[0],self.pairs[1])])
def SNR(self, Sh):
"""
Calculate the signal-to-noise ratio of a given signal strain power
spectral density, `Sh`. Must match frequency range and `df` of
`self`.
"""
integrand = Sh**2 / self.S_eff**2
return np.sqrt(2.0 * self.Tspan * np.trapz(y=integrand,
x=self.freqs,
axis=0))
@property
def S_eff(self):
"""Strain power sensitivity. """
if not hasattr(self, '_S_eff'):
ii = self.pairs[0]
jj = self.pairs[1]
kk = np.arange(len(self.chiIJ))
num = self.T_IJ[kk] / self.Tspan * self.chiIJ[kk]**2
series = num[:,np.newaxis] / (self.SnI[ii] * self.SnI[jj])
self._S_eff = np.power(np.sum(series, axis=0),-0.5)
return self._S_eff
@property
def S_effIJ(self):
"""Strain power sensitivity. """
if not hasattr(self, '_S_effIJ'):
ii = self.pairs[0]
jj = self.pairs[1]
kk = np.arange(len(self.chiIJ))
num = self.T_IJ[kk] / self.Tspan * self.chiIJ[kk]**2
self._S_effIJ = np.sqrt((self.SnI[ii] * self.SnI[jj])
/ num[:,np.newaxis])
return self._S_effIJ
class DeterSensitivityCurve(SensitivityCurve):
'''
Parameters
----------
include_corr : bool
Whether to include cross correlations from the GWB as an additional
noise source in full PTA correlation matrix.
(Has little to no effect and adds a lot of computation time.)
A_GWB : float
Value of GWB amplitude for use in cross correlations.
'''
def __init__(self, spectra, pulsar_term=True,
include_corr=False, A_GWB=None):
super().__init__(spectra)
self.T_I = np.array([sp.toas.max()-sp.toas.min() for sp in spectra])
self.pulsar_term = pulsar_term
self.include_corr = include_corr
if include_corr:
self.spectra = spectra
if A_GWB is None:
self.A_GWB = 1e-15
else:
self.A_GWB = A_GWB
Coff = HellingsDownsCoeff(self.phis, self.thetas)
self.ThetaIJ, self.chiIJ, self.pairs, self.chiRSS = Coff
self.T_IJ = np.array([get_TspanIJ(spectra[ii],spectra[jj])
for ii,jj in zip(self.pairs[0],
self.pairs[1])])
self.NcalInvI = np.array([sp.NcalInv for sp in spectra])
def SNR(self, h0):
r'''
Calculate the signal-to-noise ratio of a source given the strain
amplitude. This is based on Equation (79) from Hazboun, et al., 2019
`[1]`_.
.. math::
\rho(\hat{n})=h_0\sqrt{\frac{T_{\rm obs}}{S_{\rm eff}(f_0 ,\hat{k})}}
.. _[1]: https://arxiv.org/abs/1907.04341
'''
return h0 * np.sqrt(self.Tspan / self.S_eff)
@property
def S_eff(self):
"""Strain power sensitivity. """
if not hasattr(self, '_S_eff'):
t_I = self.T_I / self.Tspan
elements = t_I[:,np.newaxis] / self.SnI
sum1 = np.sum(elements, axis=0)
if self.include_corr:
sum = 0
ii = self.pairs[0]
jj = self.pairs[1]
kk = np.arange(len(self.chiIJ))
num = self.T_IJ[kk] / self.Tspan * self.chiIJ[kk]
summand = num[:,np.newaxis] * self.NcalInvIJ
summand *= resid_response(self.freqs)[np.newaxis,:]
sum2 = np.sum(summand, axis=0)
norm = 4./5 if self.pulsar_term else 2./5
self._S_eff = np.power(norm * sum1,-1)
return self._S_eff
@property
def NcalInvIJ(self):
"""
Inverse Noise Weighted Transmission Function that includes
cross-correlation noise from GWB.
"""
if not hasattr(self,'_NcalInvIJ'):
self._NcalInvIJ = get_NcalInvIJ(psrs=self.spectra,
A_GWB=self.A_GWB,
freqs=self.freqs,
full_matrix=True)
return self._NcalInvIJ
def HD(phis,thetas):
return HellingsDownsCoeff(np.array(phis),np.array(thetas))[1][0]
def get_NcalInvIJ(psrs, A_GWB, freqs, full_matrix=False,
return_Gtilde_Ncal=False):
r"""
Calculate the inverse-noise-wieghted transmission function for a given
pulsar. This calculates
:math:`\mathcal{N}^{-1}(f,f') , \; \mathcal{N}^{-1}(f)`
in `[1]`_, see Equations (19-20).
.. _[1]: https://arxiv.org/abs/1907.04341
Parameters
----------
psrs : list of hasasia.Pulsar objects
List of hasasia.Pulsar objects to build NcalInvIJ
Returns
-------
inverse-noise-weighted transmission function across two pulsars.
"""
Npsrs = len(psrs)
toas = np.concatenate([p.toas for p in psrs], axis=None)
# make filter
ff = np.tile(freqs, Npsrs)
## CHANGE BACK
# G = sl.block_diag(*[G_matrix(p.designmatrix) for p in psrs])
G = sl.block_diag(*[np.eye(p.toas.size) for p in psrs])
Gtilde = np.zeros((ff.size, G.shape[1]), dtype='complex128')
#N_freqs x N_TOA-N_par
Gtilde = np.dot(np.exp(1j*2*np.pi*ff[:,np.newaxis]*toas),G)
# N_freq x N_TOA-N_par
#CHANGE BACK
# psd = red_noise_powerlaw(A=A_GWB, gamma=13./3, freqs=freqs)
psd = 2*(365.25*24*3600/40)*(1e-6)**2
Ch_blocks = [[corr_from_psdIJ(freqs=freqs, psd=psd,
toasI=pc.toas, toasJ=pr.toas,
fast=True)
for pr in psrs] for pc in psrs]
C_h = | np.block(Ch_blocks) | numpy.block |
"""
Routines for plotting time-dependent vertical transects.
"""
import numpy
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.dates as mdates
import os
from . import utility
__all__ = [
'plot_scatter',
'make_scatter_plot',
'save_scatter_figure',
]
log_scale_vars = [
'specific_turbulent_kinetic_energy_of_sea_water',
'specific_turbulent_kinetic_energy_dissipation_in_sea_water',
'ocean_vertical_heat_diffusivity',
'ocean_vertical_momentum_diffusivity',
]
symmetric_vars = [
'sea_water_x_velocity',
'sea_water_y_velocity',
'upward_sea_water_velocity',
]
def plot_scatter(
cube, ax, x_coordinate, y_coordinate,
x_offset=None, y_offset=None,
title=None,
log_scale=False, symmetric_scale=False,
cmap=None, vmin=None, vmax=None, colorbar=True, norm=None,
show_grid=False, **kwargs):
"""
Plot a single cube in the given axes
"""
fig = ax.figure
_log_scale = log_scale or cube.standard_name in log_scale_vars
_symmetric_scale = symmetric_scale or cube.standard_name in symmetric_vars
if vmin is None:
vmin = cube.data.min()
if vmax is None:
vmax = cube.data.max()
if _log_scale:
vmin = max(vmin, 1e-12)
vmax = max(vmax, 1e-12)
if _symmetric_scale:
abs_lim = max(abs(vmin), abs(vmax))
vmin = -abs_lim
vmax = abs_lim
if _log_scale and norm is None:
norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)
if cube.attributes['dataset_id'][:5] == 'diff:':
# this must be a diff field
cmap = plt.get_cmap('RdBu_r')
val_max = numpy.nanmax(numpy.abs(cube.data))
vmin = -val_max
vmax = val_max
cmap_over = cube.data.max() > vmax
cmap_under = cube.data.min() < vmin
choose_cbar_extend = {
(False, False): 'neither',
(True, False): 'min',
(False, True): 'max',
(True, True): 'both',
}
cbar_extend = choose_cbar_extend[(cmap_under, cmap_over)]
label_alias = kwargs.pop('label_alias', None)
if x_coordinate == 'time':
x_coord = cube.coord('time')
x_coord.convert_units('seconds since 1970-01-01+00')
x = mdates.epoch2num(x_coord.points)
else:
x_coord = cube.coord(x_coordinate)
x = x_coord.points.copy()
y_coord = cube.coord(y_coordinate)
y = y_coord.points.copy()
def compute_coord_shift(x_offset):
coord_name, scalar, offset = x_offset
coord = cube.coord(coord_name)
if coord_name == 'time':
coord.convert_units('seconds since 1970-01-01+00')
coord_array = coord.points
if offset == 'remove-first':
b = -coord_array[0]
elif offset == 'remove-last':
b = -coord_array[-1]
else:
b = offset
x_shift = scalar*(coord_array + b)
return x_shift
if x_offset is not None:
x += compute_coord_shift(x_offset)
if y_offset is not None:
y += compute_coord_shift(y_offset)
values = cube.data
kw = {}
kw.setdefault('alpha', 1.0)
kw.setdefault('edgecolors', 'none')
kw.setdefault('s', 10)
kw.update(kwargs)
color = kw.pop('c', None)
if color is None:
color = values
p = ax.scatter(x, y, c=color, cmap=cmap, vmin=vmin, vmax=vmax, norm=norm,
**kw)
def get_time_locator():
xlim = ax.get_xlim()
range_days = xlim[1] - xlim[0]
if range_days < 15:
major_locator = mdates.DayLocator()
minor_locator = mdates.HourLocator(interval=6)
elif range_days < 30:
major_locator = mdates.DayLocator([1, 5, 10, 15, 20, 25])
minor_locator = mdates.DayLocator()
elif range_days < 80:
major_locator = mdates.DayLocator([1, 10, 20])
minor_locator = mdates.DayLocator()
elif range_days < 370:
major_locator = mdates.MonthLocator()
minor_locator = mdates.DayLocator([1, 5, 10, 15, 20, 25])
else:
major_locator = mdates.AutoDateLocator(
minticks=7, maxticks=12, interval_multiples=False
)
minor_locator = mdates.DayLocator([1, 15])
return major_locator, minor_locator
if x_coordinate == 'time':
major_locator, minor_locator = get_time_locator()
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_minor_locator(minor_locator)
ax.xaxis.set_tick_params(which='major', length=7)
plt.setp(ax.get_xticklabels(),
rotation=45, ha='right', rotation_mode='anchor')
if title is None:
loc = cube.attributes['location_name']
data_id = cube.attributes['dataset_id']
if label_alias is not None:
data_id = label_alias.get(data_id, data_id)
title = ' '.join([loc, data_id])
ax.set_title(title)
# add x label
if x_coordinate == 'time':
ax.set_xlabel('Date')
else:
x_str = x_coord.standard_name.replace('_', ' ').capitalize()
xlabel = f'{x_str} [{x_coord.units}]'
ax.set_xlabel(xlabel)
# add y label
y_str = y_coord.standard_name.replace('_', ' ').capitalize()
ylabel = f'{y_str} [{y_coord.units}]'
ax.set_ylabel(ylabel)
if colorbar:
# create colorbar
pad = 0.015
width = 0.02
pos = ax.get_position().bounds
cbax_x = pos[0] + pos[2] + pad
cax = fig.add_axes([cbax_x, pos[1], width, pos[3]])
cb_kw = {}
cb_kw.setdefault('extend', cbar_extend)
cb = plt.colorbar(p, cax=cax, **cb_kw)
label = '{:} [{:}]'.format(cube.name().replace('_', ' ').capitalize(),
cube.units)
cb.set_label(label)
def tight_axes(x, pad_fraction, set_func):
xrange = [x.min(), x.max()]
xpad = (xrange[1] - xrange[0]) * pad_fraction
set_func([xrange[0] - xpad, xrange[1] + xpad])
# tight x/ylim
axlim_pad = 0.01
tight_axes(x, axlim_pad, ax.set_xlim)
tight_axes(y, axlim_pad, ax.set_ylim)
return p
def make_scatter_plot(cube_list, *args, **kwargs):
_cube_list = list(cube_list)
if 'vmin' not in kwargs or kwargs['vmin'] is None:
kwargs['vmin'] = numpy.min([numpy.nanmin(c.data) for c in _cube_list])
if 'vmax' not in kwargs or kwargs['vmax'] is None:
kwargs['vmax'] = numpy.max([numpy.nanmax(c.data) for c in _cube_list])
plot_diff = kwargs.pop('plot_diff', False)
if plot_diff:
# compute difference between first 2 cubes
[c.data for c in _cube_list] # force real data (looks like iris bug)
# first cube is the observation
a = _cube_list[0].copy()
b = _cube_list[1].copy()
if not a.is_compatible(b):
b = utility.align_cubes(a, b)
diff = b.copy()
diff.data = b.data - a.data
assert | numpy.abs(diff.data) | numpy.abs |
import pytorch_lightning as pl
from causalode.utils import DATA_DIR
#from causalode.datagen import cancer_simulation
import causalode.utils as utils
from causalode.utils import str2bool
import torch
from torch.utils.data import Dataset, DataLoader, Subset
import os
import argparse
import numpy as np
from scipy.integrate import odeint
import pandas as pd
def fluids_input(t):
return 5*np.exp(-((t-5)/5)**2)
def v_fun(x):
return 0.02*(np.cos(5*x-0.2) * (5-x)**2)**2
def sigmoid(x):
return 1/(1 + np.exp(-x))
def dx_dt(state, t, params):
# Parameters:
f_hr_max = params["f_hr_max"]
f_hr_min = params["f_hr_min"]
r_tpr_max = params["r_tpr_max"]
r_tpr_min = params["r_tpr_min"]
ca = params["ca"]
cv = params["cv"]
k_width = params["k_width"]
p_aset = params["p_aset"]
tau = params["tau"]
t_treatment = params["t_treatment"]
# Unknown parameters:
if (params["treatment"]) and (t>=t_treatment):
initp_transform = 0.5+(params["init_pressure"]-0.75)/0.1
A_ = v_fun(initp_transform)
#A_ = 1
i_ext = A_ * fluids_input(t-t_treatment)
else:
i_ext = 0
r_tpr_mod = params["r_tpr_mod"]
sv_mod = params["sv_mod"]
# State variables
p_a = 100. * state[0]
p_v = 10. * state[1]
s = state[2]
sv = 100. * state[3]
# Building f_hr and r_tpr:
f_hr = s * (f_hr_max - f_hr_min) + f_hr_min
r_tpr = s * (r_tpr_max - r_tpr_min) + r_tpr_min - r_tpr_mod
# Building dp_a/dt and dp_v/dt:
dva_dt = -1. * (p_a - p_v) / r_tpr + sv * f_hr
dvv_dt = -1. * dva_dt + i_ext
dpa_dt = dva_dt / (ca * 100.)
dpv_dt = dvv_dt / (cv * 10.)
# Building dS/dt:
ds_dt = (1. / tau) * (1. - 1. / (1 + | np.exp(-1 * k_width * (p_a - p_aset)) | numpy.exp |
''' CONFIDENTIAL
Copyright (c) 2021 <NAME>,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
try:
import pcl
from pyquaternion import Quaternion
except:
print('cannot import pcl -> change python version')
import matplotlib.cm as cmx
from scipy.spatial import distance_matrix
from scipy.optimize import leastsq
import matplotlib
import matplotlib.animation as animation
import open3d as o3d
import glob
import cv2
import cv2.aruco as aruco
import os
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
import pickle
from matplotlib.lines import Line2D
import pandas as pd
import random
from scipy.spatial import ConvexHull
from math import sqrt
from math import atan2, cos, sin, pi
from collections import namedtuple
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pyquaternion import Quaternion
np.set_printoptions(suppress=True)
def eulerAnglesToRotationMatrix2(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
Rot_matrix = eulerAnglesToRotationMatrix2([0, 0, np.deg2rad(-90)])
InitLidar = True
InitLidar = False
global globalTrigger
globalTrigger = True
stereoRectify = False# True
#stereoRectify = True
class Annotation3D(Annotation):
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self, s, xy=(0, 0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy = (xs, ys)
Annotation.draw(self, renderer)
def save_obj(obj, name):
with open('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
print('{}.pkl Object saved'.format(name))
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def showErros(_3DErros, IMageNames):
print('len(_3DErros)->{}'.format(np.shape(_3DErros)))
if len(_3DErros)>1:
_3DErros = np.array(_3DErros).squeeze()
# norm_total = np.array(_3DErros[:,0]).squeeze()
norm_axis = np.array(_3DErros).squeeze() * 1000
index, bar_width = np.arange(len(IMageNames)), 0.24
fig, ax = plt.subplots()
X = ax.bar(index, norm_axis[:, 0], bar_width, label="X")
Y = ax.bar(index + bar_width, norm_axis[:, 1], bar_width, label="Y")
Z = ax.bar(index + bar_width + bar_width, norm_axis[:, 2], bar_width, label="Z")
ax.set_xlabel('images')
ax.set_ylabel('errors in mm')
ax.set_title('3D error')
ax.set_xticks(index + bar_width / 3)
ax.set_xticklabels(IMageNames)
ax.legend()
plt.show()
def triangulation(kp1, kp2, T_1w, T_2w):
"""Triangulation to get 3D points
Args:
kp1 (Nx2): keypoint in view 1 (normalized)
kp2 (Nx2): keypoints in view 2 (normalized)
T_1w (4x4): pose of view 1 w.r.t i.e. T_1w (from w to 1)
T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
Returns:
X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
"""
kp1_3D = np.ones((3, kp1.shape[0]))
kp2_3D = np.ones((3, kp2.shape[0]))
kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
X /= X[3]
X1 = T_1w[:3].dot(X)
X2 = T_2w[:3].dot(X)
return X[:3].T, X1.T, X2.T
def triangulate(R1,R2,t1,t2,K1,K2,D1,D2, pts1, pts2):
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
_3d_points = []
for i,point in enumerate(pts1):
point3D = cv2.triangulatePoints(P1, P2, pts1[i], pts2[i]).T
point3D = point3D[:, :3] / point3D[:, 3:4]
_3d_points.append(point3D)
print('Triangulate _3d_points -> {}'.format(np.shape(_3d_points)))
return np.array(_3d_points).squeeze()
def mai(R1,R2,t1,t2,imagePoint1,imagePoint2, K2=None,K1=None, D2=None,D1=None):
# Set up two cameras near each other
if K1 is None:
K = np.array([
[718.856, 0., 607.1928],
[0., 718.856, 185.2157],
[0., 0., 1.],
])
R1 = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
R2 = np.array([
[0.99999183, -0.00280829, -0.00290702],
[0.0028008, 0.99999276, -0.00257697],
[0.00291424, 0.00256881, 0.99999245]
])
t1 = np.array([[0.], [0.], [0.]])
t2 = np.array([[-0.02182627], [0.00733316], [0.99973488]])
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
point3D = cv2.triangulatePoints(P1, P2, imagePoint1, imagePoint2).T
point3D = point3D[:, :3] / point3D[:, 3:4]
print('Triangulate point3D -> {}'.format(point3D))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(R1.T) # Change
rvec2, _ = cv2.Rodrigues(R2.T) # Change
p1, _ = cv2.projectPoints(point3D, rvec1, -t1, K1, distCoeffs=D1) # Change
p2, _ = cv2.projectPoints(point3D, rvec2, -t2, K2, distCoeffs=D2) # Change
# measure difference between original image point and reporjected image point
reprojection_error1 = np.linalg.norm(imagePoint1 - p1[0, :])
reprojection_error2 = np.linalg.norm(imagePoint2 - p2[0, :])
print('difference between original image point and reporjected image point')
print(reprojection_error1, reprojection_error2)
return p1,p2
class PointCloud_filter(object):
def __init__(self, file, img_file=None, img_file2=None, debug=True):
self.debug = debug
self.img_file = img_file
self.img_file2 = img_file2
self.name = os.path.basename(file).split('.')[0]
self.file = file
self.useVoxel, self.voxel_size = False, 0.15
self.lowerTemplate, self.showImage = False, True
self.showError = False
self.points_correspondences = None
self.OK = False
self.useInitialPointCloud = False #user all point to fit or only margins
self.chessBoard = False
self.applyICP_directly = False
self.s = .1 # scale
self.plotInit, self.axis_on, self.colour, self.Annotate = False, True, False, False
self.chess, self.corn, self.p1, self.p2, self.p3, self.ICP_finetune_plot = None, None, None, None, None, None
if self.showImage:
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
self.ImageNames = []
self._3DErros = []
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.s
self.fig = plt.figure(figsize=plt.figaspect(0.5))
self.fig.suptitle('Data collection', fontsize=16)
self.ax = self.fig.add_subplot(1, 2, 1, projection='3d')
#self.ax = self.fig.add_subplot(1, 2, 2, projection='3d')
self.readCameraIntrin()
self.QueryImg = cv2.imread(img_file)
self.ImageNames.append(os.path.basename(img_file))
if self.img_file2: # use stereo case
self.QueryImg2 = cv2.imread(img_file2)
if stereoRectify:
self.QueryImg = cv2.remap(src=self.QueryImg, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
self.QueryImg2 = cv2.remap(src=self.QueryImg2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(self.QueryImg2, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_right and ret_left:
print('Found chessboard in both images')
self.chessBoard = True
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
self.corners2 = corners2_left
cv2.drawChessboardCorners(self.QueryImg, (10, 7), self.corners2, ret_left)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K_left, self.D_left)
imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.QueryImg = self.draw(self.QueryImg, corners=corners2_left, imgpts=imgpts)
self.pixelsPoints = np.asarray(corners2_left).squeeze()
self.pixels_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg2, (10, 7), corners2_right, ret_right)
self.pixels_right = np.asarray(corners2_right).squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
'''print('TRIANGULATE HERE==========================================')
P_1 = np.vstack((np.hstack((np.eye(3), np.zeros(3)[:, np.newaxis])), [0, 0, 0, 1])) # left camera
P_2 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # right camera
print('P1_{}, P_2{}, x_left:{}, x_right:{}'.format(np.shape(P_1), np.shape(P_2),
np.shape(self.x_left), np.shape(self.x_right)))
X_w, X1, X2 = triangulation(self.x_left,self.x_right,P_1,P_2)
print('X_w:{}, X1:{}, X2:{}, '.format(np.shape(X_w), np.shape(X1), np.shape(X2)))
print(X_w[0])
print(X1[0])
print(X2[0])'''
'''R1 = np.eye(3)
R2 = self.R
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
imagePoint1 = self.x_left[0]
imagePoint2 = self.x_right[0]
print('imagePoint1:{}, imagePoint2:{}'.format(np.shape(imagePoint1), np.shape(imagePoint2)))
print('self.K_left ')
print(self.K_left)
print('self.K_right ')
print(self.K_right)
p1,p2 = test(R1,R2,t1,t2,imagePoint1,imagePoint2,K1=self.K_left,K2=self.K_right, D1=self.D_left,D2=self.D_right)
p1 = np.array(p1).squeeze().astype(int)
p2 = np.array(p2).squeeze().astype(int)
print('p1:{}, p2:{}'.format(np.shape(p1), np.shape(p2)))
#d2 = distance_matrix(X_w, X_w)
#print('d2:{}'.format(d2))
cv2.circle(self.QueryImg, (p1[0],p1[1]), 7, (255, 0, 0), 7)
cv2.circle(self.QueryImg2, (p2[0], p2[1]), 7, (255, 0, 0), 7)
cv2.imshow('QueryImg', cv2.resize(self.QueryImg,None,fx=.5,fy=.5))
cv2.imshow('QueryImg2', cv2.resize(self.QueryImg2, None, fx=.5, fy=.5))
cv2.waitKey(0)
cv2.destroyAllWindows()'''
else:
self.chessBoard = False
self.useVoxel = False
print('No chessboard ')
corners2_left, ids_left, rejectedImgPoints = aruco.detectMarkers(gray_left, self.ARUCO_DICT)
corners2_left, ids_left, _, _ = aruco.refineDetectedMarkers(image=gray_left,
board=self.calibation_board,
detectedCorners=corners2_left,
detectedIds=ids_left,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_left,
distCoeffs=self.D_left)
corners2_right, ids_right, rejectedImgPoints = aruco.detectMarkers(gray_right, self.ARUCO_DICT)
corners2_right, ids_right, _, _ = aruco.refineDetectedMarkers(image=gray_right,
board=self.calibation_board,
detectedCorners=corners2_right,
detectedIds=ids_right,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_right,
distCoeffs=self.D_right)
if np.all(ids_left != None) and np.all(ids_right != None):
print('found charuco board, in both images')
retval_left, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners2_left, ids_left,
self.calibation_board,
self.K_left, self.D_left, None,
None)
retval_right, self.rvecs_right, self.tvecs_right = aruco.estimatePoseBoard(corners2_right,
ids_right,
self.calibation_board,
self.K_right,
self.D_right, None,
None)
if retval_left and retval_right:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
self.tvecs, 0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners2_left, ids_left,
borderColor=(0, 0, 255))
b = 1
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs_right, self.tvecs_right, self.K_right,
self.D_right)
self.corners2_right = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
circle_tvec, 0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('Cannot estimate board position for both charuco')
self.pixelsPoints = self.corners2.squeeze()
self.pixels_left = self.pixelsPoints
self.pixels_right = self.corners2_right.squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
# self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
print('disparity:{}'.format(np.shape(disparity)))
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
else:
print('No any board found!!!')
else:
# Undistortion
h, w = self.QueryImg.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (w, h), 1, (w, h))
dst = cv2.undistort(self.QueryImg, self.K, self.D, None, newcameramtx)
x, y, w, h = roi
self.QueryImg = dst[y:y + h, x:x + w]
gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
if ret: # found chessboard
print('Found chessboard')
self.chessBoard = True
self.corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg, (10, 7), corners, ret)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K, self.D)
# ret, self.rvecs, self.tvecs, inliers = cv2.solvePnPRansac(self.objp, self.corners2, self.K, self.D)
self.imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K, self.D)
self.QueryImg = self.draw(self.QueryImg, self.corners2, self.imgpts)
self.pixelsPoints = np.asarray(self.corners2).squeeze()
else: # check for charuco
self.chessBoard = False
self.useVoxel = False
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
self.chessBoard = False
if len(ids) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
if retval:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, self.tvecs,
0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, ids,
borderColor=(0, 0, 255))
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, circle_tvec,
0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('No board Found')
self.image_ax = self.fig.add_subplot(1, 2, 2)
#self.image_ax = self.fig.add_subplot(1, 2, 1)
self.image_ax.imshow(self.QueryImg)
self.image_ax.set_axis_off()
self.image_ax.set_xlabel('Y')
self.image_ax.set_ylabel('Z')
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlabel('X', fontsize=10)
self.ax.set_ylabel('Y', fontsize=10)
self.ax.set_zlabel('Z', fontsize=10)
self.fig.tight_layout()
plt.subplots_adjust(left=.15, bottom=0.2)
#plt.subplots_adjust( bottom=0.2)
self.Rx, self.Ry, self.Rz = [np.deg2rad(-90), 0, np.deg2rad(-40)] if self.chessBoard else [0, 0, 0]
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.savePoints = Button(plt.axes([0.03, 0.45, 0.15, 0.04], ), 'filter points', color='white')
self.savePoints.on_clicked(self.getClosestPoints)
self.resetBtn = Button(plt.axes([0.03, 0.25, 0.15, 0.04], ), 'reset', color='white')
self.resetBtn.on_clicked(self.reset)
self.X_btn = Button(plt.axes([0.03, 0.9, 0.024, 0.04], ), 'X', color='red')
self.X_btn.on_clicked(self.Close)
self.OK_btn = Button(plt.axes([0.03, 0.83, 0.074, 0.04], ), 'OK', color='green')
self.OK_btn.on_clicked(self.OK_btnClick)
self.not_OK_btn = Button(plt.axes([0.105, 0.83, 0.074, 0.04], ), 'not OK', color='red')
self.not_OK_btn.on_clicked(self.not_OK_btnClick)
self.saveCorrespondences = Button(plt.axes([0.03, 0.76, 0.15, 0.04], ), 'Save points', color='white')
self.saveCorrespondences.on_clicked(self.savePointsCorrespondences)
self.fitChessboard = Button(plt.axes([0.03, 0.66, 0.15, 0.04], ), 'auto fit', color='white')
self.fitChessboard.on_clicked(self.auto_fitBoard)
# set up sliders
self.Rx_Slider = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Rx', -180, 180.0, valinit=np.degrees(self.Rx))
self.Ry_Slider = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'Ry', -180, 180.0, valinit=np.degrees(self.Ry))
self.Rz_Slider = Slider(plt.axes([0.25, 0.05, 0.65, 0.03]), 'Rz', -180, 180.0, valinit=np.degrees(self.Rz))
self.Rx_Slider.on_changed(self.update_R)
self.Ry_Slider.on_changed(self.update_R)
self.Rz_Slider.on_changed(self.update_R)
self.check = CheckButtons(plt.axes([0.03, 0.3, 0.15, 0.12]), ('Axes', 'Black', 'Annotate'),
(self.axis_on, self.colour, self.Annotate))
self.check.on_clicked(self.func_CheckButtons)
# set up translation buttons
self.step = .1 # m
self.trigger = True
self.Tx_btn_plus = Button(plt.axes([0.05, 0.15, 0.04, 0.045]), '+Tx', color='white')
self.Tx_btn_plus.on_clicked(self.Tx_plus)
self.Tx_btn_minus = Button(plt.axes([0.12, 0.15, 0.04, 0.045]), '-Tx', color='white')
self.Tx_btn_minus.on_clicked(self.Tx_minus)
self.Ty_btn_plus = Button(plt.axes([0.05, 0.1, 0.04, 0.045]), '+Ty', color='white')
self.Ty_btn_plus.on_clicked(self.Ty_plus)
self.Ty_btn_minus = Button(plt.axes([0.12, 0.1, 0.04, 0.045]), '-Ty', color='white')
self.Ty_btn_minus.on_clicked(self.Ty_minus)
self.Tz_btn_plus = Button(plt.axes([0.05, 0.05, 0.04, 0.045]), '+Tz', color='white')
self.Tz_btn_plus.on_clicked(self.Tz_plus)
self.Tz_btn_minus = Button(plt.axes([0.12, 0.05, 0.04, 0.045]), '-Tz', color='white')
self.Tz_btn_minus.on_clicked(self.Tz_minus)
self.Tx_flip = Button(plt.axes([0.17, 0.15, 0.04, 0.045]), 'FlipX', color='white')
self.Tx_flip.on_clicked(self.flipX)
self.Ty_flip = Button(plt.axes([0.17, 0.1, 0.04, 0.045]), 'FlipY', color='white')
self.Ty_flip.on_clicked(self.flipY)
self.Tz_flip = Button(plt.axes([0.17, 0.05, 0.04, 0.045]), 'FlipZ', color='white')
self.Tz_flip.on_clicked(self.flipZ)
self.radio = RadioButtons(plt.axes([0.03, 0.5, 0.15, 0.15], ), ('Final', 'Init'), active=0)
self.radio.on_clicked(self.colorfunc)
self.tag = None
self.circle_center = None
self.errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d.",
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible.",
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
self.legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Original pointcloud', markerfacecolor='g', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Corners', markerfacecolor='k', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Margins', markerfacecolor='r', markersize=4),
]
def setUp(self):
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.board()
self.ax.legend(handles=self.legend_elements, loc='best')
if self.showImage:
self.getDepth_Inside_Outside()
self.fitNewPlan()
def auto_fitBoard(self, args):
# estimate 3D-R and 3D-t between chess and PointCloud
# Inital guess of the transformation
x0 = np.array([np.degrees(self.Rx), np.degrees(self.Ry), np.degrees(self.Rz), self.Tx, self.Ty, self.Tz])
report = {"error": [], "template": []}
def f_min(x):
self.Rx, self.Ry, self.Rz = np.deg2rad(x[0]), np.deg2rad(x[1]), np.deg2rad(x[2])
self.Tx, self.Ty, self.Tz = x[3], x[4], x[5]
template = self.board(plot=False)
if self.useInitialPointCloud:
dist_mat = distance_matrix(template, self.point_cloud)
else:
dist_mat = distance_matrix(template, self.corners_)
err_func = dist_mat.sum(axis=1) # N x 1
# err_func = dist_mat.sum(axis=0) # N x 1
if self.debug:
print('errors = {}, dist_mat:{}, err_func:{}'.format(round(np.sum(err_func), 2), np.shape(dist_mat),
np.shape(err_func)))
report["error"].append(np.sum(err_func))
report["template"].append(template)
return err_func
maxIters = 700
sol, status = leastsq(f_min, x0, ftol=1.49012e-07, xtol=1.49012e-07, maxfev=maxIters)
print('sol:{}, status:{}'.format(sol, status))
print(self.errors[status])
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.ICP_finetune_plot:
self.ICP_finetune_plot.remove()
self.lowerTemplate = False
self.board()
point_cloud = np.asarray(self.point_cloud, dtype=np.float32)
template = np.asarray(report["template"][0], dtype=np.float32) if self.applyICP_directly else np.asarray(
self.template_cloud, dtype=np.float32)
converged, self.transf, estimate, fitness = self.ICP_finetune(template, point_cloud)
# converged, self.transf, estimate, fitness = self.ICP_finetune(point_cloud,template)
self.estimate = np.array(estimate)
if self.chessBoard:
self.ICP_finetune_plot = self.ax.scatter(self.estimate[:, 0], self.estimate[:, 1], self.estimate[:, 2],
c='k', marker='o', alpha=0.8, s=4)
else:
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = self.estimate[idx, :]
self.ICP_finetune_plot = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2],
c='k', marker='o', alpha=0.8, s=4)
self.trigger = False
# set values of sol to Sliders
self.Rx_Slider.set_val(np.rad2deg(self.Rx))
self.Ry_Slider.set_val(np.rad2deg(self.Ry))
self.Rz_Slider.set_val(np.rad2deg(self.Rz))
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.trigger = True
self.board()
self.AnnotateEdges()
self.fig.canvas.draw_idle()
if self.showError:
print('min error:{} , at index:{}'.format(np.min(report["error"]), np.argmin(report["error"])))
rep = plt.figure(figsize=(15, 8))
plt.xlim(0, len(report["error"]) + 1)
plt.xlabel('Iteration')
plt.ylabel('RMSE')
plt.yticks(color='w')
plt.plot(np.arange(len(report["error"])) + 1, report["error"])
print('Start animation gif')
def update_graph(num):
data = np.asarray(report["template"][num])
graph._offsets3d = (data[:, 0], data[:, 1], data[:, 2])
title.set_text('Iteration {}'.format(num))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
title = ax.set_title('3D Test')
data = report["template"][0]
graph = ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2])
ani = animation.FuncAnimation(fig, update_graph, 101, interval=2, blit=False, repeat=False)
ani.save('myAnimation.gif', writer='imagemagick', fps=30)
print('Animation done')
plt.show()
def flipX(self, event):
self.Rx_Slider.set_val(np.rad2deg(self.Rx + np.pi))
self.update_R(0)
def flipY(self, event):
self.Ry_Slider.set_val(np.rad2deg(self.Ry + np.pi))
self.update_R(0)
def flipZ(self, event):
self.Rz_Slider.set_val(np.rad2deg(self.Rz + np.pi))
self.update_R(0)
def update_R(self, val):
if self.trigger:
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.Rx = np.deg2rad(self.Rx_Slider.val)
self.Ry = np.deg2rad(self.Ry_Slider.val)
self.Rz = np.deg2rad(self.Rz_Slider.val)
self.board()
self.fig.canvas.draw_idle()
def board(self, plot=True, given_origin=None, angle=None):
self.board_origin = [self.Tx, self.Ty, self.Tz] if given_origin is None else given_origin
if self.chessBoard:
self.nCols, self.nRows, org = 7 + 2, 10 + 2, np.asarray(self.board_origin)
#org[0] -= self.nCols / 2
#org[1] -= self.nRows / 2
org[0] -= 4
org[1] -= 6
#org = np.zeros(3)
if self.lowerTemplate:
nrCols, nrRows = 2, 3
else:
nrCols, nrRows = self.nCols, self.nRows
#nrCols, nrRows = self.nCols+1, self.nRows+1 #remove later
print('org:{}, self.nCols - >{}, nrCols:{}'.format(org,self.nCols,nrCols))
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,num=nrRows)
X, Y = np.linspace(org[0], org[0] + self.nCols-1, num=nrCols), np.linspace(org[1], org[1] + self.nRows-1,
num=nrRows)
print('X:{}'.format(X))
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
colors, colortuple = np.empty(X.shape, dtype=str), ('k', 'w')
for y in range(nrCols):
for x in range(nrRows):
colors[x, y] = colortuple[(x + y) % len(colortuple)]
colors[0, 0] = 'r'
alpha = 0.65
else:
self.nCols, self.nRows, org = 10, 10, np.asarray(self.board_origin)
org[0] -= self.nCols / 2
org[1] -= self.nRows / 2
# nrCols, nrRows = 4,4z
nrCols, nrRows = self.nCols, self.nRows
# nrCols, nrRows = 20, 20
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,
num=nrRows)
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
alpha = 0.25
angles = np.array([self.Rx, self.Ry, self.Rz]) if angle is None else np.array(angle)
Rot_matrix = self.eulerAnglesToRotationMatrix(angles)
X, Y, Z = X * self.s, Y * self.s, Z * self.s
corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0))
init = corners.reshape(-1, 3)
print('corners-----------------------------------------------------')
#print(init)
print('corners -> {}'.format(np.shape(init)))
dist_Lidar = distance_matrix(init, init)
print('dist_Lidar corners---------------------------------------------------------')
print(dist_Lidar[0, :11])
translation = np.mean(init, axis=0) # get the mean point
corners = np.subtract(corners, translation) # substract it from all the other points
X, Y, Z = np.transpose(np.add(np.dot(corners, Rot_matrix), translation), (2, 0, 1))
# corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0)).reshape(-1, 3)
corners = np.transpose(np.array([X, Y, Z]), (2, 1, 0)).reshape(-1, 3)
if plot:
if self.chessBoard:
self.chess = self.ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0.2, cmap='gray', alpha=alpha)
else:
self.chess = self.ax.plot_surface(X, Y, Z, linewidth=0.2, cmap='gray', alpha=alpha)
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = corners[idx, :]
self.corn = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2], c='tab:blue',
marker='o', s=5)
self.template_cloud = corners
return np.array(corners)
def getPointCoud(self, colorsMap='jet', skip=1, useRing = True):
# X, Y, Z, intensity, ring
if useRing:
originalCloud = np.array(np.load(self.file, mmap_mode='r'))[:,:5]
if InitLidar:
xyz = originalCloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
originalCloud[:, 0:3] = new_xyz
#mean_x = np.mean(originalCloud[:, 0])
#originalCloud[:, 0] = mean_x
df = pd.DataFrame(data=originalCloud, columns=["X", "Y", "Z","intens","ring"])
gp = df.groupby('ring')
keys = gp.groups.keys()
#groups = gp.groups
coolPoints, circlePoints = [],[]
for i in keys:
line = np.array(gp.get_group(i), dtype=np.float)
first,last = np.array(line[0], dtype=np.float)[:3],np.array(line[-1], dtype=np.float)[:3]
coolPoints.append(first)
coolPoints.append(last)
if self.chessBoard == False:
if len(line) > 50:
l = line[:,:3]
for i in range(2,len(l)-2,1):
d = np.linalg.norm(l[i]-l[i+1])
if d > 0.08: #half of the circle
circlePoints.append(l[i])
circlePoints.append(l[i+1])
self.coolPoints = np.array(coolPoints).squeeze()
self.ax.scatter(*self.coolPoints.T, color='r', marker='o', alpha=1, s=2)
print('coolPoints:{}, circlePoints:{}'.format(np.shape(self.coolPoints), np.shape(circlePoints)))
circlePoints = np.array(circlePoints)
if len(circlePoints)>0:
self.ax.scatter(*circlePoints.T, color='r', marker='o', alpha=1, s=5)
self.fitCircle(circlePoints)
#self.point_cloud = np.array(self.coolPoints, dtype=np.float32)
self.point_cloud = np.array(np.load(self.file, mmap_mode='r')[::skip, :3], dtype=np.float32)
if InitLidar:
xyz = self.point_cloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
self.point_cloud[:, 0:3] = new_xyz
# center the point_cloud
#mean_x = np.mean(self.point_cloud[:, 0])
#self.point_cloud[:, 0] = mean_x
self.point_cloud_mean = np.mean(self.point_cloud, axis=0)
self.Tx, self.Ty, self.Tz = self.point_cloud_mean
# self.point_cloud = self.point_cloud - self.point_cloud_mean
self.point_cloud_colors = np.array(np.load(self.file, mmap_mode='r'))[::skip, 3]
if self.plotInit:
cm = plt.get_cmap(colorsMap)
cNorm = matplotlib.colors.Normalize(vmin=min(self.point_cloud_colors), vmax=max(self.point_cloud_colors))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
self.p1 = self.ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2],
color=scalarMap.to_rgba(self.point_cloud_colors), s=0.2)
else:
self.p = pcl.PointCloud(self.point_cloud)
inlier, outliner, coefficients = self.do_ransac_plane_segmentation(self.p, pcl.SACMODEL_PLANE,
pcl.SAC_RANSAC, 0.01)
#self.planeEquation(coef=np.array(coefficients).squeeze())
self.point_cloud_init = self.point_cloud.copy()
if self.useVoxel:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.point_cloud)
self.point_cloud = np.array(pcd.voxel_down_sample(voxel_size=self.voxel_size).points)
# self.p1 = self.ax.scatter(outliner[:, 0], outliner[:, 1], outliner[:, 2], c='y', s=0.2)
self.p2 = self.ax.scatter(inlier[:, 0], inlier[:, 1], inlier[:, 2], c='g', s=0.2)
w, v = self.PCA(inlier)
point = np.mean(inlier, axis=0)
if self.chessBoard == False and self.circle_center:
#point[1:] = self.circle_center
point[[0,2]]= self.circle_center
w *= 2
if self.chessBoard==False and self.circle_center:
p = Circle(self.circle_center, self.circle_radius, alpha = .3, color='tab:blue')
self.ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=point[1], zdir="y")
self.p3 = self.ax.quiver([point[0]], [point[1]], [point[2]], [v[0, :] * np.sqrt(w[0])],
[v[1, :] * np.sqrt(w[0])],
[v[2, :] * np.sqrt(w[0])], linewidths=(1.8,))
def axisEqual3D(self, centers=None):
extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
# centers = np.mean(extents, axis=1) if centers is None
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def planeEquation(self, coef):
a, b, c, d = coef
mean = np.mean(self.point_cloud, axis=0)
normal = [a, b, c]
d2 = -mean.dot(normal)
# print('d2:{}'.format(d2))
# print('mean:{}'.format(mean))
# print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))
# plot the normal vector
startX, startY, startZ = mean[0], mean[1], mean[2]
startZ = (-normal[0] * startX - normal[1] * startY - d) * 1. / normal[2]
self.ax.quiver([startX], [startY], [startZ], [normal[0]], [normal[1]], [normal[2]], linewidths=(3,),edgecolor="red")
def PCA(self, data, correlation=False, sort=True):
# data = nx3
mean = np.mean(data, axis=0)
data_adjust = data - mean
#: the data is transposed due to np.cov/corrcoef syntax
if correlation:
matrix = np.corrcoef(data_adjust.T)
else:
matrix = np.cov(data_adjust.T)
eigenvalues, eigenvectors = np.linalg.eig(matrix)
if sort:
#: sort eigenvalues and eigenvectors
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def eulerAnglesToRotationMatrix(self, theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def do_ransac_plane_segmentation(self, pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
"""
Create the segmentation object
:param pcl_data: point could data subscriber
:param pcl_sac_model_plane: use to determine plane models
:param pcl_sac_ransac: RANdom SAmple Consensus
:param max_distance: Max distance for apoint to be considered fitting the model
:return: segmentation object
"""
seg = pcl_data.make_segmenter()
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
inlier_object = pcl_data.extract(inliers, negative=False)
outlier_object = pcl_data.extract(inliers, negative=True)
if len(inliers) <= 1:
outlier_object = [0, 0, 0]
inlier_object, outlier_object = np.array(inlier_object), np.array(outlier_object)
return inlier_object, outlier_object, coefficients
def func_CheckButtons(self, label):
if label == 'Axes':
if self.axis_on:
self.ax.set_axis_off()
self.axis_on = False
else:
self.ax.set_axis_on()
self.axis_on = True
elif label == 'Black':
if self.colour:
self.colour = False
self.ax.set_facecolor((1, 1, 1))
else:
self.colour = True
self.ax.set_facecolor((0, 0, 0))
elif label == 'Annotate':
self.Annotate = not self.Annotate
self.AnnotateEdges()
self.fig.canvas.draw_idle()
def ICP_finetune(self, points_in, points_out):
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(points_in)
cloud_out.from_array(points_out)
# icp = cloud_in.make_IterativeClosestPoint()
icp = cloud_out.make_IterativeClosestPoint()
converged, transf, estimate, fitness = icp.icp(cloud_in, cloud_out)
print('fitness:{}, converged:{}, transf:{}, estimate:{}'.format(fitness, converged, np.shape(transf),
np.shape(estimate)))
return converged, transf, estimate, fitness
def colorfunc(self, label):
if label == 'Init':
self.plotInit = True
else:
self.plotInit = False
self.reset(0)
def OK_btnClick(self, args):
self.OK = True
plt.close()
def not_OK_btnClick(self, args):
self.OK = False
plt.close()
def Close(self, args):
global globalTrigger
globalTrigger = False
plt.close()
def reset(self, args):
self.ax.cla()
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.Rx, self.Ry, self.Rz = 0, 0, 0
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.board()
self.fig.canvas.draw_idle()
def getClosestPoints(self, arg):
dist_mat = distance_matrix(self.template_cloud, self.point_cloud_init)
self.neighbours = np.argsort(dist_mat, axis=1)[:, 0]
self.finaPoints = np.asarray(self.point_cloud_init[self.neighbours, :]).squeeze()
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.p3:
self.p3.remove()
if self.p2:
self.p2.remove()
if self.p1:
self.p1.remove()
self.scatter_finalPoints = self.ax.scatter(self.finaPoints[:, 0], self.finaPoints[:, 1], self.finaPoints[:, 2],
c='k', marker='x', s=1)
self.corn = self.ax.scatter(self.template_cloud[:, 0], self.template_cloud[:, 1], self.template_cloud[:, 2],
c='blue', marker='o', s=5)
self.fig.canvas.draw_idle()
def Tz_plus(self, event):
self.Tz += self.step
self.update_R(0)
def Tz_minus(self, event):
self.Tz -= self.step
self.update_R(0)
def Ty_plus(self, event):
self.Ty += self.step
self.update_R(0)
def Ty_minus(self, event):
self.Ty -= self.step
self.update_R(0)
def Tx_plus(self, event):
self.Tx += self.step
self.update_R(0)
def Tx_minus(self, event):
self.Tx -= self.step
self.update_R(0)
def readCameraIntrin(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_left = self.camera_model['K_left']
self.K_right = self.camera_model['K_right']
self.D_left = self.camera_model['D_left']
self.D_right = self.camera_model['D_right']
# self.K_left = self.camera_model['K_right']
# self.K_right = self.camera_model['K_left']
# self.D_left = self.camera_model['D_right']
# self.D_right = self.camera_model['D_left']
# print('K_left')
# print(self.K_left)
# print('K_right')
# print(self.K_right)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.T = np.array([-0.98, 0., 0.12])[:, np.newaxis]
#self.T = np.array([-.75, 0., 0.])[:, np.newaxis]
#print('self T after {}'.format(np.shape(self.T)))
#angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
#self.R = euler_matrix(angles)
#Q = self.camera_model_rectify['Q']
#roi_left, roi_right = self.camera_model_rectify['roi_left'], self.camera_model_rectify['roi_right']
self.leftMapX, self.leftMapY = self.camera_model_rectify['leftMapX'], self.camera_model_rectify['leftMapY']
self.rightMapX, self.rightMapY = self.camera_model_rectify['rightMapX'], self.camera_model_rectify['rightMapY']
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
self.K = self.K_right
self.D = self.D_right
try:
N = 5
aruco_dict = aruco.custom_dictionary(0, N, 1)
aruco_dict.bytesList = np.empty(shape=(4, N - 1, N - 1), dtype=np.uint8)
A = np.array([[0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(A)
R = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(R)
V = np.array([[1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0]],
dtype=np.uint8)
O = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(O)
aruco_dict.bytesList[3] = aruco.Dictionary_getByteListFromBits(V)
self.ARUCO_DICT = aruco_dict
self.calibation_board = aruco.GridBoard_create(
markersX=2, markersY=2,
markerLength=0.126, markerSeparation=0.74,
dictionary=self.ARUCO_DICT)
except:
print('Install Aruco')
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
return img
def annotate3D(self, ax, s, *args, **kwargs):
self.tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(self.tag)
def AnnotateEdges(self, giveAX=None, givenPoints=None):
if self.Annotate:
# add vertices annotation.
if giveAX is None:
if self.lowerTemplate or self.chessBoard == False:
if self.chessBoard == False:
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
self.templatePoints = [pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]
self.templatePoints = np.array(self.templatePoints).reshape(-1, 3)
cornersToPLot = self.estimate[idx, :]
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=12, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(self.template_cloud):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
try:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
except:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols+1, self.nRows+1, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
# templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nRows,self.nCols, 3)[1:self.nRows-1,1:self.nCols-1,:]
self.templatePoints = np.array(templatePoints).reshape(-1, 3)
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(givenPoints):
self.annotate3D(giveAX, s=str(j), xyz=xyz_, fontsize=10, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
if self.showImage:
# annotate image
points = np.asarray(self.corners2).squeeze()
font, lineType = cv2.FONT_HERSHEY_SIMPLEX, 2 if self.chessBoard else 10
for i, point in enumerate(points):
point = tuple(point.ravel())
cv2.putText(self.QueryImg, '{}'.format(i), point, font, 1 if self.chessBoard else 3, (0, 0, 0)
if self.chessBoard else (255, 0, 0), lineType)
self.image_ax.imshow(self.QueryImg)
def getCamera_XYZ_Stereo(self):
#cam_rot, jac = cv2.Rodrigues(self.rvecs)
#mR = np.matrix(cam_rot)
#mT = np.matrix(self.tvecs)
#cam_trans = -mR * mT
_3DPoints = []
for i, pixel in enumerate(self.x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = self.depth[i]
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - self.fxypxy[2]) / self.fxypxy[0]
pt[1] = pt[2] * (pt[1] - self.fxypxy[3]) / self.fxypxy[1]
# pt = pt.dot(cam_rot.T) + self.tvecs
_3DPoints.append(pt)
print('_3DPoints {}'.format(np.shape(_3DPoints)))
print('tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ_Stereo mean {}'.format(np.mean(_3DPoints, axis=0)))
_3DPoints = np.array(_3DPoints).squeeze()
print('from disparity getCamera_XYZ_Stereo ')
d = distance_matrix(_3DPoints,_3DPoints)
print(d)
return _3DPoints
def getCamera_XYZ(self):
R_mtx, jac = cv2.Rodrigues(self.rvecs)
inv_R_mtx = np.linalg.inv(R_mtx)
inv_K = np.linalg.inv(self.K)
def compute_XYZ(u, v): # from 2D pixels to 3D world
uv_ = np.array([[u, v, 1]], dtype=np.float32).T
suv_ = uv_
xyz_ = inv_K.dot(suv_) - self.tvecs
XYZ = inv_R_mtx.dot(xyz_)
pred = XYZ.T[0]
return pred
Camera_XYZ = []
for i, point in enumerate(self.pixelsPoints):
xyz = compute_XYZ(u=point[0], v=point[1])
# print 'xyz:{}'.format(xyz)
Camera_XYZ.append(xyz)
Camera_XYZ = np.array(Camera_XYZ)
print('init tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ mean {}'.format(np.mean(Camera_XYZ, axis=0)))
if self.img_file2 is None:
for i, point in enumerate(Camera_XYZ):
imgpts, jac = cv2.projectPoints(point, self.rvecs, self.tvecs, self.K, self.D)
imgpts = np.asarray(imgpts).squeeze()
cv2.circle(self.QueryImg, (int(imgpts[0]), int(imgpts[1])), 7, (255, 0, 0), 7)
self.image_ax.imshow(self.QueryImg)
return Camera_XYZ
def getImagePixels(self):
img = cv2.imread(self.img_file) #left image
img2 = cv2.imread(self.img_file2) # left image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pixelsPoints,pixelsPoints2, _3DreconstructedBoard = [],[],[]
if self.chessBoard:
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
ret2, corners2 = cv2.findChessboardCorners(gray2, (10, 7), None)
if ret and ret2: # found chessboard
print('Found chessboard')
corners_2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
corners2_2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), self.criteria)
pixelsPoints = np.asarray(corners_2).squeeze()
pixelsPoints2 = np.asarray(corners2_2).squeeze()
cv2.drawChessboardCorners(img, (10, 7), corners_2, ret)
cv2.drawChessboardCorners(img2, (10, 7), corners2_2, ret)
# Find the rotation and translation vectors.
success, rvecs, tvecs, inliers = cv2.solvePnPRansac(self.objp, corners_2, self.K, self.D)
rvecs, _ = cv2.Rodrigues(rvecs)
_3Dpoints = self.objp
# project 3D points to image plane
_2Dpoints, jac = cv2.projectPoints(_3Dpoints, rvecs, tvecs, self.K, self.D)
_2Dpoints = np.array(_2Dpoints, dtype=np.float32).squeeze()
print('_2Dpoints -> {}'.format(np.shape(_2Dpoints)))
for i in range(len(_2Dpoints)):
cv2.circle(img, tuple(_2Dpoints[i]), 5, (0, 255, 0), 3)
_3Dpoints = rvecs.dot(_3Dpoints.T) + tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat[0, :11])
_3DreconstructedBoard = _3Dpoints
else:
return None,None
else:
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers(gray2, self.ARUCO_DICT)
corners2, ids2, rejectedImgPoints2, recoveredIds2 = aruco.refineDetectedMarkers(
image=gray2, board=self.calibation_board, detectedCorners=corners2, detectedIds=ids2,
rejectedCorners=rejectedImgPoints2, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None) and np.all(ids2 != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
if len(ids) and len(ids2) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
retval2, self.rvecs2, self.tvecs2 = aruco.estimatePoseBoard(corners2, ids2,
self.calibation_board, self.K,
self.D, None, None)
img = aruco.drawDetectedMarkers(img, corners, ids,borderColor=(0, 0, 255))
img2 = aruco.drawDetectedMarkers(img2, corners2, ids2, borderColor=(0, 0, 255))
if retval and retval2:
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
self.dst2, jacobian = cv2.Rodrigues(self.rvecs2)
#self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0],[.5,.5,0]])
_3Dpoints = self.dst.T.dot(np.array(self.pts).squeeze().T) + self.tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat)
_3DreconstructedBoard = _3Dpoints
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img = cv2.line(img, top_right, bot_right, (0, 255, 0), 4)
img = cv2.line(img, bot_right, bot_left, (0, 255, 0), 4)
img = cv2.line(img, bot_left, top_left, (0, 255, 0), 4)
img = cv2.line(img, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img, tuple(corners2[-1]), 5, (0, 255, 0), 3)
cv2.circle(img, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints = np.asarray(corners2).squeeze()
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs2, self.tvecs2, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img2 = cv2.line(img2, top_right, bot_right, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_right, bot_left, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_left, top_left, (0, 255, 0), 4)
img2 = cv2.line(img2, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img2, tuple(corners2[-1]), 5, (0, 255, 0), 3)
#cv2.circle(img2, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints2 = np.asarray(corners2).squeeze()
else:
return None,None
else:
return None,None
else:
return None,None
scale = .4
_horizontal = np.hstack(
(cv2.resize(img, None, fx=scale, fy=scale), cv2.resize(img2, None, fx=scale, fy=scale)))
cv2.imshow('_horizontal', _horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()
return pixelsPoints,pixelsPoints2, _3DreconstructedBoard
def savePointsCorrespondences(self, args):
display = True
fig = plt.figure(figsize=plt.figaspect(1))
ax = plt.axes(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if self.chessBoard:
legend_elements = [
Line2D([0], [0], marker='o', label='board template', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='green', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='k', markersize=6),
Line2D([0], [0], marker='o', label='Camera_XYZ', markerfacecolor='red', markersize=6),
]
board_template = self.template_cloud
board_template_ICP_finetuned = self.estimate
closest_lidar_points = self.finaPoints
try:
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
except:
print('Second-----------------------------')
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
icp_finetuned_inside = np.array(icp_finetuned_inside).reshape(-1, 3)
board_template_inside = np.array(board_template_inside).reshape(-1, 3)
print('board_template_inside-----------------------------------------------------')
print(board_template_inside)
print('board_template_inside -> {}'.format(np.shape(board_template_inside)))
dist_Lidar = distance_matrix(board_template_inside, board_template_inside)
print('dist_Lidar---------------------------------------------------------')
print(dist_Lidar[0, :11])
closest_lidar_points_inside = np.array(closest_lidar_points_inside).reshape(-1, 3)
Camera_XYZ = self.getCamera_XYZ()
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
display = True
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('icp_finetuned_inside:{}'.format(np.shape(icp_finetuned_inside)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('closest_lidar_points_inside:{}'.format(np.shape(closest_lidar_points_inside)))
print('Camera_XYZ:{}'.format(np.shape(Camera_XYZ)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
#dist = distance_matrix(Camera_XYZ_Stereo, Camera_XYZ_Stereo)
#print('distance matrix Camera_XYZ_Stereo:{}'.format(dist))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*icp_finetuned_inside.T, color='g', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*closest_lidar_points_inside.T, color='k', marker='x', alpha=1, s=20)
ax.scatter(*Camera_XYZ.T, color='k', marker='x', alpha=1, s=30)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=1, s=3)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ_Stereo, axis=0) if self.img_file2 is not None else np.mean(board_template,axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft)<=0:
print('Cannot get pixels points !!! ')
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('icp_finetuned_inside', icp_finetuned_inside),
('closest_lidar_points', closest_lidar_points),
('closest_lidar_points_inside', closest_lidar_points_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ', Camera_XYZ)])
# save_obj(self.points_correspondences, self.name)
else:
legend_elements = [
Line2D([0], [0], marker='o', label='board template all', markerfacecolor='b', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='red', markersize=6),
Line2D([0], [0], marker='o', label='board template inside', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='red', markersize=6),
]
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
board_template = np.array([pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1,
3)
board_template = board_template
pts = np.asarray(self.estimate.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.estimate[idx], axis=0)
board_template_ICP_finetuned = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
board_template_inside = self.templatePoints
pts = np.asarray(self.finaPoints.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.finaPoints[idx], axis=0)
closest_lidar_points = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=.8, s=20)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ, axis=0) if self.img_file2 is not None else np.mean(board_template, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
_3DreconstructedBoard = np.array(_3DreconstructedBoard).squeeze()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft) <= 0:
print('Cannot get pixels points !!! ')
ax.scatter(*_3DreconstructedBoard.T, color='b', marker='x', alpha=1, s=20)
print('pixelsPointsLeft:{}'.format(np.shape(self.pixelsPointsLeft)))
print('pixelsPointsRight:{}'.format(np.shape(self.pixelsPointsRight)))
print('_3DreconstructedBoard:{}'.format(np.shape(_3DreconstructedBoard)))
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('closest_lidar_points', closest_lidar_points)])
# save_obj(self.points_correspondences, self.name)
ax.legend(handles=legend_elements, loc='best')
plt.show()
def getDepth_Inside_Outside(self):
calibrations = ['inside', 'outside']
output = []
for calib in calibrations:
camera_model = load_obj('{}_combined_camera_model'.format(calib))
camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(calib))
K_left = camera_model['K_right']
D_left = camera_model['D_right']
T = camera_model['T']
leftMapX, leftMapY = camera_model_rectify['leftMapX'], camera_model_rectify['leftMapY']
rightMapX, rightMapY = camera_model_rectify['rightMapX'], camera_model_rectify['rightMapY']
imgleft = cv2.imread(self.img_file)
imgright = cv2.imread(self.img_file2)
if stereoRectify:
imgleft = cv2.remap(src=imgleft, map1=leftMapX, map2=leftMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
imgright = cv2.remap(src=imgright, map1=rightMapX, map2=rightMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(imgleft, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(imgright, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_left and ret_right: # found chessboard
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
x_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
x_right = np.asarray(corners2_right).squeeze()
baseline = abs(T[0])
focal_length, cx, cy = K_left[0, 0], K_left[0, 2], K_left[1, 2]
disparity = np.sum(np.sqrt((x_left - x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
depth = (baseline * focal_length / disparity) # .reshape(10,7)
fxypxy = [K_left[0, 0], K_left[1, 1], cx, cy]
print('{} fx:{}, fy:{}'.format(calib, round(K_left[0, 0],2), round(K_left[1, 1],2)))
_3DPoints = []
for i, pixel in enumerate(x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = depth[i]
# print('u:{},v:{},distance:{}'.format(u,v, distance))
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - fxypxy[2]) / fxypxy[0]
pt[1] = pt[2] * (pt[1] - fxypxy[3]) / fxypxy[1]
_3DPoints.append(pt)
_3DPoints = np.array(_3DPoints)
output.append(_3DPoints)
else:
print('cannot detect board in both images')
if len(output)>1:
inside_3D = np.array(output[0]).squeeze()
outisde_3D = np.array(output[1]).squeeze()
#get the error for each point
a_min_b = inside_3D - outisde_3D
norm_total = np.linalg.norm(a_min_b)/70
norm_axis = np.linalg.norm(a_min_b, axis=0)/70
print('norm_total:{}, norm_axis:{}'.format(norm_total,norm_axis))
self._3DErros.append(norm_axis)
def fitNewPlan(self):
coolPoints = self.coolPoints
def minimum_bounding_rectangle(points):
pi2 = np.pi / 2.
# get the convex hull for the points
hull = ConvexHull(points)
hull_points = points[hull.vertices]
y_saved = []
for simplex in hull.simplices:
y = coolPoints[simplex,1]
x = points[simplex, 0]
z = points[simplex, 1]
self.ax.plot(x, y, z, 'k-', alpha = .5)
y_saved.append(y)
y_saved = np.array(y_saved)
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
rotations = np.vstack([
np.cos(angles),np.cos(angles - pi2),
np.cos(angles + pi2),np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
rval = np.array(rval)
d_matrix = distance_matrix(rval, points)
neighbours = np.argsort(d_matrix, axis=1)[:, 0]
rval2 = np.asarray(coolPoints[neighbours, 1]).squeeze()
return rval, rval2
points = list(self.coolPoints[:, [0, -1]])
y = np.mean(self.coolPoints[:, 1])
c, c2 = minimum_bounding_rectangle(np.array(points))
self.corners_ = []
for i,point in enumerate(c):
#self.corners_.append([point[0],y, point[1]])
self.corners_.append([point[0],c2[i], point[1]])
if self.chessBoard==False and self.circle_center:
self.corners_.append([self.circle_center[0],y,self.circle_center[1]])
self.corners_ = np.array(self.corners_)
self.ax.scatter(*self.corners_.T, color='k', marker='x', alpha=1, s=50)
def fitCircle(self, points):
if len(points)>0:
def calc_R(x, y, xc, yc):
"""calculate the distance of each 2D points from the center (xc, yc)"""
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f(c, x, y):
"""calculate the algebraic distance between the data points
and the mean circle centered at c=(xc, yc)"""
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def sigma(coords, x, y, r):
"""Computes Sigma for circle fit."""
dx, dy, sum_ = 0., 0., 0.
for i in range(len(coords)):
dx = coords[i][1] - x
dy = coords[i][0] - y
sum_ += (sqrt(dx * dx + dy * dy) - r) ** 2
return sqrt(sum_ / len(coords))
def hyper_fit(coords, IterMax=99, verbose=False):
"""
Fits coords to circle using hyperfit algorithm.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : s, sigma - variance of data wrt solution (float)
"""
X, Y = None, None
if isinstance(coords, np.ndarray):
X = coords[:, 0]
Y = coords[:, 1]
elif isinstance(coords, list):
X = np.array([x[0] for x in coords])
Y = np.array([x[1] for x in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
n = X.shape[0]
Xi = X - X.mean()
Yi = Y - Y.mean()
Zi = Xi * Xi + Yi * Yi
# compute moments
Mxy = (Xi * Yi).sum() / n
Mxx = (Xi * Xi).sum() / n
Myy = (Yi * Yi).sum() / n
Mxz = (Xi * Zi).sum() / n
Myz = (Yi * Zi).sum() / n
Mzz = (Zi * Zi).sum() / n
# computing the coefficients of characteristic polynomial
Mz = Mxx + Myy
Cov_xy = Mxx * Myy - Mxy * Mxy
Var_z = Mzz - Mz * Mz
A2 = 4 * Cov_xy - 3 * Mz * Mz - Mzz
A1 = Var_z * Mz + 4. * Cov_xy * Mz - Mxz * Mxz - Myz * Myz
A0 = Mxz * (Mxz * Myy - Myz * Mxy) + Myz * (Myz * Mxx - Mxz * Mxy) - Var_z * Cov_xy
A22 = A2 + A2
# finding the root of the characteristic polynomial
y = A0
x = 0.
for i in range(IterMax):
Dy = A1 + x * (A22 + 16. * x * x)
xnew = x - y / Dy
if xnew == x or not np.isfinite(xnew):
break
ynew = A0 + xnew * (A1 + xnew * (A2 + 4. * xnew * xnew))
if abs(ynew) >= abs(y):
break
x, y = xnew, ynew
det = x * x - x * Mz + Cov_xy
Xcenter = (Mxz * (Myy - x) - Myz * Mxy) / det / 2.
Ycenter = (Myz * (Mxx - x) - Mxz * Mxy) / det / 2.
x = Xcenter + X.mean()
y = Ycenter + Y.mean()
r = sqrt(abs(Xcenter ** 2 + Ycenter ** 2 + Mz))
s = sigma(coords, x, y, r)
iter_ = i
if verbose:
print('Regression complete in {} iterations.'.format(iter_))
print('Sigma computed: ', s)
return x, y, r, s
def least_squares_circle(coords):
"""Circle fit using least-squares solver.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : MSE of solution against training data (float)
"""
x, y = None, None
if isinstance(coords, np.ndarray):
x = coords[:, 0]
y = coords[:, 1]
elif isinstance(coords, list):
x = np.array([point[0] for point in coords])
y = np.array([point[1] for point in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, _ = leastsq(f, center_estimate, args=(x, y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R) ** 2)
return xc, yc, R, residu
def plot_data_circle(x, y, xc, yc, R):
"""
Plot data and a fitted circle.
Inputs:
x : data, x values (array)
y : data, y values (array)
xc : fit circle center (x-value) (float)
yc : fit circle center (y-value) (float)
R : fir circle radius (float)
Output:
None (generates matplotlib plot).
"""
f = plt.figure(facecolor='white')
plt.axis('equal')
theta_fit = np.linspace(-pi, pi, 180)
x_fit = xc + R * np.cos(theta_fit)
y_fit = yc + R * np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-', label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.scatter(x, y, c='red', label='data')
plt.legend(loc='best', labelspacing=0.1)
plt.grid()
plt.title('Fit Circle')
x1, y1, r1, resid1 = hyper_fit(points[:,[0,2]])
x2, y2, r2, resid2 = least_squares_circle(points[:,[0,2]])
#plot_data_circle(points[:,1], points[:,2],x,y,r)
if resid1>resid2:
x, y, r = x2, y2, r2
else:
x, y, r = x1, y1, r1
self.circle_center = (x, y)
self.circle_radius = r
def getData(chess=True):
pcl_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/*.npy'.format('chess' if chess else 'charuco'))
imgleft_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/left/*.png'.format('chess' if chess else 'charuco'))
imgright_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/right/*.png'.format('chess' if chess else 'charuco'))
pcl_files.sort()
imgleft_files.sort()
imgright_files.sort()
GoodPoints,_3DErros, IMageNames = [],[],[]
for i, file in enumerate(pcl_files):
if globalTrigger:
print('work with {}'.format(file))
image_left = imgleft_files[i]
image_right = imgright_files[i]
filt = PointCloud_filter(file=file, img_file=image_left, img_file2=image_right, debug=False)
filt.setUp()
plt.show()
plt.close()
print('\n OK:{}, Save points_correspondences : {}'.format(filt.OK, np.shape(filt.points_correspondences)))
if filt.OK:
GoodPoints.append(filt.points_correspondences)
print('save data {} '.format(np.shape(GoodPoints)))
_3DErros.append(filt._3DErros)
IMageNames.append(os.path.basename(image_left))
else:
print('Close')
break
#save_obj(GoodPoints, 'GoodPoints2_{}'.format('chess' if chess else 'charuco'))
print('Data saved in GoodPoints')
showErros(_3DErros, IMageNames)
def euler_from_matrix(R):
beta = -np.arcsin(R[2, 0])
alpha = np.arctan2(R[2, 1] / np.cos(beta), R[2, 2] / np.cos(beta))
gamma = np.arctan2(R[1, 0] / np.cos(beta), R[0, 0] / np.cos(beta))
return np.array((alpha, beta, gamma))
def euler_matrix(theta):
R = np.array([[np.cos(theta[1]) * np.cos(theta[2]),
np.sin(theta[0]) * np.sin(theta[1]) * np.cos(theta[2]) - np.sin(theta[2]) * np.cos(theta[0]),
np.sin(theta[1]) * np.cos(theta[0]) * np.cos(theta[2]) + np.sin(theta[0]) * np.sin(
theta[2])],
[np.sin(theta[2]) * np.cos(theta[1]),
np.sin(theta[0]) * np.sin(theta[1]) * np.sin(theta[2]) + np.cos(theta[0]) * np.cos(theta[2]),
np.sin(theta[1]) * np.sin(theta[2]) * np.cos(theta[0]) - np.sin(theta[0]) * np.cos(
theta[2])],
[-np.sin(theta[1]), np.sin(theta[0]) * np.cos(theta[1]),
np.cos(theta[0]) * np.cos(theta[1])]])
return R
class LiDAR_Camera_Calibration(object):
def __init__(self, file, chess = True, debug=True):
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * .1
self.debug = debug
self.file = file
self.chess = chess
if chess:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside',
'icp_finetuned_inside','closest_lidar_points','closest_lidar_points_inside',
'pixelsPoints','Camera_XYZ_Stereo','Camera_XYZ']
else:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside','pixelsPoints',
'Camera_XYZ_Stereo','closest_lidar_points']
self.readIntrinsics()
try:
self.load_points()
except:
print('cannot load data points')
'''self.Rotation = np.array([[ 0.94901505, 0.01681284, 0.3147821 ],
[-0.01003801, 0.99968204, -0.02313113],
[-0.31507091, 0.018792, 0.94888207]]).squeeze()
self.Translation = np.array([[-0.98078971],
[ 0.00600202],
[ 0.19497569]]).squeeze()
#self.Translation[0] = -.64
euler = euler_from_matrix(self.Rotation)
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.Rotation)
print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler[1] = np.deg2rad(22.598)
self.Rotation = euler_matrix(euler)'''
def rmse(self, objp, imgp, K, D, rvec, tvec):
print('objp:{}, imgp:{}'.format(np.shape(objp), np.shape(imgp)))
predicted, _ = cv2.projectPoints(objp, rvec, tvec, K, D)
print('rmse=====================================================')
print('predicted -> {}, type - >{}'.format(np.shape(predicted), type(predicted)))
predicted = cv2.undistortPoints(predicted, K, D, P=K)
predicted = predicted.squeeze()
pix_serr = []
for i in range(len(predicted)):
xp = predicted[i, 0]
yp = predicted[i, 1]
xo = imgp[i, 0]
yo = imgp[i, 1]
pix_serr.append((xp - xo) ** 2 + (yp - yo) ** 2)
ssum = sum(pix_serr)
return math.sqrt(ssum / len(pix_serr))
def readIntrinsics(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_right = self.camera_model['K_left']
self.K_left = self.camera_model['K_right']
self.D_right = self.camera_model['D_left']
self.D_left = self.camera_model['D_right']
print(' self.K_right')
print( self.K_right)
print(' self.K_left')
print(self.K_left)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.K = self.K_right
self.D = self.D_right
print('self T before {}'.format(np.shape(self.T)))
self.T = np.array([-0.96, 0., 0.12])[:, np.newaxis]
print('self T after {}'.format(np.shape(self.T)))
angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
self.R = euler_matrix(angles)
#-----------------------------------------------------
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#print(self.R)
print('translation is {}-----------------------------'.format(self.T))
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
#print('R1:{}'.format(R1))
#print('R2:{}'.format(R2))
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.R)
print('self.R: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R1)
#print('euler1->{}'.format(euler))
angles = euler_from_matrix(R1)
#print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R2)
#print('euler2->{}'.format(euler))
angles = euler_from_matrix(R2)
#print('rotation2: ', [(180.0 / math.pi) * i for i in angles])
self.R1 = R1
self.R2 = R2
self.P1 = P1
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
print('Got camera intrinsic')
print('Got camera-lidar extrinsics')
def load_points(self):
self.Lidar_3D, self.Image_2D,self.Image_2D2, self.Image_3D,self.Camera_XYZ = [],[],[],[],[]
with open(self.file, 'rb') as f:
self.dataPoinst = pickle.load(f, encoding='latin1')
#with open(self.file,'rb') as f:
#self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
#self.N = 1
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] #N x 3
#pixelsPoints = dictionary_data['pixelsPoints'] #N x 2
#StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] #N x 3
pixelsPointsLeft = dictionary_data['pixelsPointsLeft']
pixelsPointsRight = dictionary_data['pixelsPointsRight']
StereoCam_3D_points = dictionary_data['_3DreconstructedBoard'] #N x 3
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPointsLeft)
self.Image_2D2.append(pixelsPointsRight)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
#print('Cannot read data')
pass
#self.Lidar_3D = np.array(self.Lidar_3D).reshape(-1,3)
#self.Image_2D = np.array(self.Image_2D).reshape(-1,2)
#self.Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_2D:{}, Image_2D2:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),np.shape(self.Image_2D2),
np.shape(self.Image_3D)))
def plotData(self):
self.fig = plt.figure(figsize=plt.figaspect(0.33))
self.fig.tight_layout()
for i in range(self.N):
print('{}/{}'.format(i+1,self.N))
ax1 = self.fig.add_subplot(1, 3, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax2 = self.fig.add_subplot(1, 3, 2, projection='3d')
ax2.set_title('3D Stereo cameras')
ax2.set_xlabel('X', fontsize=8)
ax2.set_ylabel('Y', fontsize=8)
ax2.set_zlabel('Z', fontsize=8)
ax3 = self.fig.add_subplot(1, 3, 3, projection='3d')
ax3.set_title('2D pixels')
ax3.set_xlabel('X', fontsize=8)
ax3.set_ylabel('Y', fontsize=8)
ax3.set_zlabel('Z', fontsize=8)
_3d_LIDAR = np.array(self.Lidar_3D[i])
ax1.scatter(*_3d_LIDAR.T)
self.axisEqual3D(ax1, _3d_LIDAR)
_3d_cam = np.array(self.Image_3D[i])
ax2.scatter(*_3d_cam.T, c='r')
self.axisEqual3D(ax2,_3d_cam)
_2d_cam = np.array(self.Image_2D[i])
ax3.scatter(*_2d_cam.T, c='g')
self.axisEqual3D(ax3, _2d_cam)
plt.show()
def axisEqual3D(self,ax,data):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(data, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def get3D_3D_homography(self, src, dst): #both or Nx3 matrices
src_mean = np.mean(src, axis=0)
dst_mean = np.mean(dst, axis=0)
# Compute covariance
"""try:
H = reduce(lambda s, (a, b): s + np.outer(a, b), zip(src - src_mean, dst - dst_mean), np.zeros((3, 3)))
u, s, v = np.linalg.svd(H)
R = v.T.dot(u.T) # Rotation
T = - R.dot(src_mean) + dst_mean # Translation
H = np.hstack((R, T[:, np.newaxis]))
return H,R.T,T
except:
print('switch to python 2')"""
def calibrate_3D_3D_old(self):
print('3D-3D ========================================================================================')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_3D3D_{}.pkl'.format('chess')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format('chess')
self.Lidar_3D, self.Image_2D, self.Image_3D, self.Camera_XYZ = [], [], [], []
with open(file, 'rb') as f:
self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] # N x 3
pixelsPoints = dictionary_data['pixelsPoints'] # N x 2
StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] # N x 3
#StereoCam_3D_points = dictionary_data['point3D_trianguate']
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPoints)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
print('Cannot read data===================================================')
break
print('Lidar_3D:{}, Image_2D:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),
np.shape(self.Image_3D)))
Lidar_3D = np.array(self.Lidar_3D).reshape(-1, 3)
Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_3D:{}'.format(np.shape(Lidar_3D),np.shape(Image_3D)))
#-------------------------------------#-------------------------------------
c_, R_, t_ = self.estimate(Lidar_3D,Image_3D)
#import superpose3d as super
#(RMSD, R_, t_, c_) = super.Superpose3D(Lidar_3D, Image_3D)
#print('RMSD -> {}, t_{}, c_->{}'.format(RMSD, t_, c_))
# -------------------------------------#-------------------------------------
def similarity_transform(from_points, to_points):
assert len(from_points.shape) == 2, \
"from_points must be a m x n array"
assert from_points.shape == to_points.shape, \
"from_points and to_points must have the same shape"
N, m = from_points.shape
mean_from = from_points.mean(axis=0)
mean_to = to_points.mean(axis=0)
delta_from = from_points - mean_from # N x m
delta_to = to_points - mean_to # N x m
sigma_from = (delta_from * delta_from).sum(axis=1).mean()
sigma_to = (delta_to * delta_to).sum(axis=1).mean()
cov_matrix = delta_to.T.dot(delta_from) / N
U, d, V_t = np.linalg.svd(cov_matrix, full_matrices=True)
cov_rank = np.linalg.matrix_rank(cov_matrix)
S = np.eye(m)
if cov_rank >= m - 1 and np.linalg.det(cov_matrix) < 0:
S[m - 1, m - 1] = -1
elif cov_rank < m - 1:
raise ValueError("colinearility detected in covariance matrix:\n{}".format(cov_matrix))
R = U.dot(S).dot(V_t)
c = (d * S.diagonal()).sum() / sigma_from
t = mean_to - c * R.dot(mean_from)
print('R:{},t:{},c:{}'.format(R,t,c))
return c * R, t
print('similarity_transform===============================')
from_points = Lidar_3D
to_points = Image_3D
M_ans, t_ans = similarity_transform(from_points, to_points)
H, R, T = self.get3D_3D_homography(src = Lidar_3D, dst=Image_3D)
print('H:{}, R:{}, T:{}'.format(np.shape(H), np.shape(R), np.shape(T)))
print(H)
self.fig = plt.figure(figsize=plt.figaspect(1.))
ax1 = self.fig.add_subplot(1, 1, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax1.set_axis_off()
_3d_LIDAR = self.Lidar_3D[0]
ax1.scatter(*_3d_LIDAR.T, label = 'LiDAR')
_3d_Image = self.Image_3D[0]
ax1.scatter(*_3d_Image.T, s=25, label = 'Stereo Cam')
T = _3d_LIDAR.dot(c_ * R_) + t_
print('T -> {}'.format(np.shape(T)))
ax1.scatter(*T.T, marker='x', label='T')
d2 = distance_matrix(_3d_Image,_3d_Image)
print('d2:{}'.format(d2))
print('d2 shape :{}'.format(np.shape(d2)))
ones = np.ones(len(_3d_LIDAR))[:, np.newaxis]
transformed_ = np.hstack((_3d_LIDAR,ones))
transformed = np.dot(H, transformed_.T).T #transformation estimated with SVD
print(np.shape(transformed))
ax1.scatter(*transformed.T, s=25, label = 'ICP sol')
#ax1.set_axis_off()
primary = Lidar_3D# _3d_LIDAR
secondary = Image_3D# _3d_Image
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:, :-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y # to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
#print transform(primary)
print("Max error:", np.abs(secondary - transform(primary)).max())
trns2 = transform(_3d_LIDAR) #transformation estimated with LS
ax1.scatter(*trns2.T, label = 'least square sol')
to_points = M_ans.dot(_3d_LIDAR.T).T + t_ans
print('to_points ->{}'.format(np.shape(to_points)))
ax1.scatter(*to_points.T, label = 'to_points')
self.axisEqual3D(ax1, transformed)
ax1.legend()
plt.show()
#----------------------------------
if True:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_4.npy'
else:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_4.npy'
i = 12
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
#img, img2 = cv2.imread(l), cv2.imread(r)
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/cloud_{}.npy'.format(i)
if stereoRectify and True:
img = cv2.remap(src=img, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
img2 = cv2.remap(src=img2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
#Points in LiDAR frame
LiDAR_points3D = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3] #
print('LiDAR_points3D:{}'.format(np.shape(LiDAR_points3D)))
#converted in camera frame
ones = np.ones(len(LiDAR_points3D))[:, np.newaxis]
transformed_ = np.hstack((LiDAR_points3D, ones))
Camera_points3D = np.dot(H, transformed_.T).T
#Camera_points3D = transform(LiDAR_points3D)
#print('Camera_points3D:{}'.format(np.shape(Camera_points3D)))
#Camera_points3D = LiDAR_points3D.dot(c_ * R_) + t_
#Camera_points3D = LiDAR_points3D.dot(R_) + t_
#Camera_points3D = transform(LiDAR_points3D) #transformation estimated with LS
print('Camera_points3D -> {}'.format(Camera_points3D))
rvec, _ = cv2.Rodrigues(np.eye(3))
tvec = np.zeros(3)
#Camera_points3D = LiDAR_points3D#.dot(R_) + t_
#rvec = R_
#tran = t_
#tran[0] = -0.02
#tran[1] = -0.03
print('rvec -> {}, tvec->{}'.format(np.shape(rvec),np.shape(tvec)))
print('Camera_points3D -> {}'.format(np.shape(Camera_points3D)))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(np.eye(3).T) # Change
rvec2, _ = cv2.Rodrigues(self.R.T) # Change
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
p1, _ = cv2.projectPoints(Camera_points3D[:, :3], rvec1, -t1, self.K, distCoeffs=self.D) # Change
p2, _ = cv2.projectPoints(Camera_points3D[:, :3], rvec2, -t2, self.K, distCoeffs=self.D) # Change
#points2D = [cv2.projectPoints(point, rvec, tvec, self.K, self.D)[0] for point in Camera_points3D[:, :3]]
points2D, _ = cv2.projectPoints(Camera_points3D[:, :3], np.identity(3), np.array([0., 0., 0.]), self.K, self.D)
points2D = np.asarray(points2D).squeeze()
points2D = np.asarray(p1).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D), img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img, tuple(points2D[i]), 2, (0, 255, 0), -1)
#cv2.circle(img2, tuple(points2D[i]), 2, (0, 255, 0), -1)
print('rvec -> {}, tvec->{}'.format(np.shape(rvec),np.shape(tvec)))
T_01 = np.vstack((np.hstack((np.eye(3), tvec[:,np.newaxis])), [0, 0, 0, 1])) # from lidar to right camera
T_12 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # between cameras
T_final = np.dot(T_01,T_12)
rotation, translation = T_final[:3, :3], T_final[:3, -1]
points2D = [cv2.projectPoints(point, rotation, translation, self.K, self.D)[0] for point in Camera_points3D[:, :3]]
points2D = np.asarray(points2D).squeeze()
points2D = np.asarray(p2).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D), img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img2, tuple(points2D[i]), 2, (0, 255, 0), -1)
cv2.imshow('left', cv2.resize(img,None, fx=.4, fy=.4))
cv2.imshow('right', cv2.resize(img2, None, fx=.4, fy=.4))
cv2.waitKey()
cv2.destroyAllWindows()
def drawCharuco(self, QueryImg):
points2D = np.array(self.Image_2D[0]).reshape(-1, 2)
for p in points2D:
cv2.circle(QueryImg, tuple(p), 4, (0, 0, 255), 5)
return QueryImg
def calibrate_3D_2D(self, userRansac = False):
points3D = np.array(self.Lidar_3D).reshape(-1, 3)
points2D = np.array(self.Image_2D).reshape(-1,2)
print('points3D:{}, points2D:{}'.format(np.shape(points3D),np.shape(points2D)))
# Estimate extrinsics
if userRansac:
success, rotation_vector, translation_vector, inliers = cv2.solvePnPRansac(points3D,
points2D, self.K, self.D,
flags=cv2.SOLVEPNP_ITERATIVE)
print('success:{},rotation_vector:{},translation_vector:{},inliers:{}'.format(success, np.shape(rotation_vector),
np.shape(translation_vector), np.shape(inliers)))
# Compute re-projection error.
points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
translation_vector, self.K, self.D)[0].squeeze(1)
error = (points2D_reproj - points2D)[inliers] # Compute error only over inliers.
error = np.asarray(error).squeeze()
print('points2D_reproj:{}, points2D:{},error:{}'.format(np.shape(points2D_reproj), np.shape(points2D), np.shape(error)))
rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
print('Re-projection error before LM refinement (RMSE) in px: ' + str(rmse))
# Refine estimate using LM
if not success:
print('Initial estimation unsuccessful, skipping refinement')
elif not hasattr(cv2, 'solvePnPRefineLM'):
print('solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
else:
assert len(inliers) >= 3, 'LM refinement requires at least 3 inlier points'
rotation_vector, translation_vector = cv2.solvePnPRefineLM(points3D[inliers],
points2D[inliers], self.K, self.D,
rotation_vector, translation_vector)
# Compute re-projection error.
points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
translation_vector, self.K, self.D)[0].squeeze(1)
assert (points2D_reproj.shape == points2D.shape)
error = (points2D_reproj - points2D)[inliers] # Compute error only over inliers.
error = np.array(error).squeeze()
rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
print('Re-projection error after LM refinement (RMSE) in px: ' + str(rmse))
# Convert rotation vector
#from tf.transformations import euler_from_matrix
rotation_matrix = cv2.Rodrigues(rotation_vector)[0]
euler = euler_from_matrix(rotation_matrix)
# Save extrinsics
np.savez('extrinsics{}.npz'.format('chess' if self.chess else 'charuco'),euler=euler,Rodrigues=rotation_matrix, R=rotation_vector, T=translation_vector)
# Display results
print('Euler angles (RPY):', euler)
print('Rotation Matrix Rodrigues :', rotation_matrix)
print('rotation_vector:', rotation_vector)
print('Translation Offsets:', translation_vector)
points2D = cv2.projectPoints(points3D, rotation_vector, translation_vector, self.K, self.D)[0].squeeze(1)
print('========points3D:{}, points2D:{}=================================================='.format(np.shape(points3D),np.shape(points2D)))
else:
#-------------------------------------------------------------------------------------------------
imgp = np.array([points2D], dtype=np.float32).squeeze()
objp = np.array([points3D], dtype=np.float32).squeeze()
retval, rvec, tvec = cv2.solvePnP(objp, imgp, self.K, self.D, flags=cv2.SOLVEPNP_ITERATIVE)
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
print("Transform from camera to laser")
print("T = ")
print(tvec)
print("R = ")
print(rmat)
print("Quaternion = ")
print(q)
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'solvePnP_extrinsics{}.npz'.format('chess' if self.chess else 'charuco')
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
print("Result output format: qx qy qz qw tx ty tz")
#refine results
print('refine results------------------------------------>')
rvec, tvec = cv2.solvePnPRefineLM(objp,imgp, self.K, self.D, rvec, tvec)
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
print("Transform from camera to laser")
print("T = ")
print(tvec)
print("R = ")
print(rmat)
print("Quaternion = ")
print(q)
print('Euler angles')
angles = euler_from_matrix(rmat)
print(angles)
print('euler angles ', [(180.0 / math.pi) * i for i in angles])
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'refined_solvePnP_extrinsics{}.npz'.format('chess' if self.chess else 'charuco')
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
def get_z(self, T_cam_world, T_world_pc, K):
R = T_cam_world[:3, :3]
t = T_cam_world[:3, 3]
proj_mat = np.dot(K, np.hstack((R, t[:, np.newaxis])))
xyz_hom = np.hstack((T_world_pc, np.ones((T_world_pc.shape[0], 1))))
xy_hom = np.dot(proj_mat, xyz_hom.T).T
z = xy_hom[:, -1]
z = np.asarray(z).squeeze()
return z
def callback_solvePnP(self, img, cloud_file):
#init calibraiton
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/solvePnP_extrinsics{}.npz'.format(
'chess' if self.chess else 'charuco')
calib_file_ = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/combined_extrinsics{}.npz'
with open(calib_file, 'r') as f:
data = f.read().split()
#print('data:{}'.format(data))
qx = float(data[0])
qy = float(data[1])
qz = float(data[2])
qw = float(data[3])
tx = float(data[4])
ty = float(data[5])
tz = float(data[6])
q = Quaternion(qw, qx, qy, qz).transformation_matrix
q[0, 3] = tx
q[1, 3] = ty
q[2, 3] = tz
print("Extrinsic parameter - camera to laser")
print(q)
tvec = q[:3, 3]
rot_mat = q[:3, :3]
rvec, _ = cv2.Rodrigues(rot_mat)
try:
objPoints = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
print('objPoints:{}'.format(np.shape(objPoints)))
Z = self.get_z(q, objPoints, self.K)
objPoints = objPoints[Z > 0]
#print('objPoints:{}'.format(objPoints))
img_points, _ = cv2.projectPoints(objPoints, rvec, tvec, self.K, self.D)
img_points = np.squeeze(img_points)
for i in range(len(img_points)):
try:
cv2.circle(img, (int(round(img_points[i][0])), int(round(img_points[i][1]))), 3,
(0, 255, 0), 1)
except OverflowError:
continue
if self.chess:
cv2.drawChessboardCorners(img, (10, 7), np.array(self.Image_2D).reshape(-1,2), True)
else:
self.drawCharuco(img)
except:
print('callback_solvePnP - error')
image = cv2.resize(img, None, fx=.6, fy=.6)
return image
def callback_solvePnP_Ransac(self, img, cloud_file):
points3D = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
print('points3D:{}'.format(np.shape(points3D)))
file = np.load('extrinsics{}.npz'.format('chess' if self.chess else 'charuco'))
euler = np.array(file["euler"])
rotation_matrix = np.array(file["Rodrigues"])
rotation_vector = np.array(file["R"])
translation_vector = np.array(file["T"])
print('Euler angles (RPY):', euler)
print('Rotation Matrix Rodrigues :', rotation_matrix)
print('rotation_vector:', rotation_vector)
print('Translation Offsets:', translation_vector)
rvec = rotation_matrix
#rvec, _ = cv2.Rodrigues(rotation_matrix)
print('========points3D:{}=================================================='.format(
np.shape(points3D)))
#points2D = cv2.projectPoints(points3D, rotation_vector, translation_vector, self.K, self.D)[0].squeeze(1)
#print('points2D:{}'.format(np.shape(points2D)))
points2D = [cv2.projectPoints(point, rvec, translation_vector, self.K, self.D)[0] for point in points3D[:, :3]]
points2D = np.asarray(points2D).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D),img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img, tuple(points2D[i]), 2, (0, 255, 0), -1)
if self.chess:
cv2.drawChessboardCorners(img, (10, 7), np.array(self.Image_2D).reshape(-1,2), True)
else:
self.drawCharuco(img)
image = cv2.resize(img, None, fx=.6, fy=.6)
return image
def callback(self):
if self.chess:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_0.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_0.npy'
else:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_0.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_0.npy'
#img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_0.png')
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_0.npy'
#img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_0.png')
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_0.npy'
#solvePnP_Ransac_image = self.callback_solvePnP_Ransac(img=img.copy(),cloud_file=cloud_file)
cv2.imshow('solvePnP_Ransac', cv2.resize(img,None,fx=.4,fy=.4))
cv2.waitKey()
solvePnP_image = self.callback_solvePnP(img=img.copy(),cloud_file=cloud_file)
cv2.imshow('solvePnP', solvePnP_image)
cv2.waitKey()
cv2.destroyAllWindows()
def combine_both_boards_and_train(self):
#get data from chessboard
name = 'chess'
self.file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format(name)
self.load_points()
Lidar_3D, Image_2D, Image_3D = np.array(self.Lidar_3D).reshape(-1,3), np.array(self.Image_2D).reshape(-1,2), np.array(self.Image_3D).reshape(-1,3)
#get data from charuco
name = 'charuco'
self.file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format(name)
self.load_points()
Lidar_3D, Image_2D = np.vstack((Lidar_3D, np.array(self.Lidar_3D).reshape(-1,3))), np.vstack((Image_2D, np.array(self.Image_2D).reshape(-1,2)))
print('Lidar_3D:->{}, Image_2D:->{}'.format(np.shape(Lidar_3D), np.shape(Image_2D)))
imgp = np.array([Image_2D], dtype=np.float32).squeeze()
objp = np.array([Lidar_3D], dtype=np.float32).squeeze()
retval, rvec, tvec = cv2.solvePnP(objp, imgp, self.K, self.D, flags=cv2.SOLVEPNP_ITERATIVE)
print('tvec -> {}'.format(tvec.ravel()))
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
angles = euler_from_matrix(rmat)
print(angles)
print('euler angles ', [(180.0 / math.pi) * i for i in angles])
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'combined_extrinsics{}.npz'
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
print('Combined calibration done!!!')
def computeTransformation(self):
i = 5
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
img1 = cv2.imread(l)
img2 = cv2.imread(r)
#sift = cv2.SIFT_create()
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.8 * n.distance:
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
#F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
E, mask = cv2.findEssentialMat(pts1, pts2, self.K, cv2.RANSAC, 0.999, 1.0, None)
print(E)
points, R, t, mask = cv2.recoverPose(E, pts1, pts2, self.K)
print('R')
print(R)
angles = euler_from_matrix(R)
print('rotation angles: ', [(180.0 / math.pi) * i for i in angles])
print('t')
print(t)
for pt1, pt2 in zip(pts1, pts2):
color = tuple(np.random.randint(0, 255, 3).tolist())
img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)
img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)
cv2.imshow('imgL', cv2.resize(img1, None, fx=.4, fy=.4))
cv2.imshow('imgR', cv2.resize(img2, None, fx=.4, fy=.4))
cv2.waitKey(0)
cv2.destroyAllWindows()
def write_ply(self, fn, verts, colors):
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
out_colors = colors.copy()
verts = verts.reshape(-1, 3)
verts = np.hstack([verts, out_colors])
with open(fn, 'wb') as f:
f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))
np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')
def view(self):
import glob
import open3d
file = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/*.ply')
pcda = []
for i, file_path in enumerate(file):
print("{} Load a ply point cloud, print it, and render it".format(file_path))
pcd = open3d.io.read_point_cloud(file_path)
pcda.append(pcd)
open3d.visualization.draw_geometries([pcd])
#o3d.visualization.draw_geometries([pcda[1], pcda[-1]])
def reproject_on_3D(self, useUnique = True):
def readCalibrationExtrinsic():
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/solvePnP_extrinsics{}.npz'.format(
'chess' if self.chess else 'charuco')
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/combined_extrinsics{}.npz'
with open(calib_file, 'r') as f:
data = f.read().split()
#print('data:{}'.format(data))
qx = float(data[0])
qy = float(data[1])
qz = float(data[2])
qw = float(data[3])
tx = float(data[4])
ty = float(data[5])
tz = float(data[6])
q = Quaternion(qw, qx, qy, qz).transformation_matrix
q[0, 3],q[1, 3],q[2, 3] = tx,ty,tz
tvec = q[:3, 3]
rot_mat = q[:3, :3]
#rvec, _ = cv2.Rodrigues(rot_mat)
rvec = rot_mat
print('tvec -> {}'.format(tvec))
return rvec, tvec, q
rvec, tvec, q = readCalibrationExtrinsic()
print(self.K)
print(self.D)
print(rvec)
print(tvec)
i=1
i=11
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
imgLeft, imgRight = cv2.imread(l),cv2.imread(r)
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/cloud_{}.npy'.format(i)
_3DPoints = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
#Left image--------------------------------------------------------------------------------------------
objPoints_left = _3DPoints.copy()
Z = self.get_z(q, objPoints_left, self.K)
objPoints_left = objPoints_left[Z > 0]
print('objPoints_left:{}'.format(np.shape(objPoints_left)))
points2D_left, _ = cv2.projectPoints(objPoints_left, rvec, tvec, self.K, self.D)
points2D_left = np.squeeze(points2D_left)
print('objPoints_left -> {}, points2D_left -> {}, '.format(np.shape(objPoints_left), np.shape(points2D_left)))
inrange_left = np.where((points2D_left[:, 0] > 0) & (points2D_left[:, 1] > 0) &
(points2D_left[:, 0] < imgLeft.shape[1]-1) & (points2D_left[:, 1] < imgLeft.shape[0]-1))
print('inrange_left : {}'.format(np.shape(inrange_left)))
points2D_left = points2D_left[inrange_left[0]].round().astype('int')
print('points2D_left:{}, '.format(np.shape(points2D_left)))
#Right image ----------------------------------------------------------------------------------------
objPoints_right = _3DPoints.copy()
Z = self.get_z(q, objPoints_right, self.K_left)
objPoints_right = objPoints_right[Z > 0]
T_01 = np.vstack((np.hstack((rvec, tvec[:, np.newaxis])), [0,0,0,1])) #from lidar to right camera
T_12 = np.vstack((np.hstack((self.R, self.T)), [0,0,0,1])) #between cameras
T_final = np.dot(T_12, T_01)
rotation, translation = T_final[:3,:3], T_final[:3,-1]
points2D_right, _ = cv2.projectPoints(objPoints_right, rotation, translation, self.K_left, self.D_left)
points2D_right = np.squeeze(points2D_right)
inrange_right = np.where((points2D_right[:, 0] >= 0) &(points2D_right[:, 1] >= 0) &
(points2D_right[:, 0] < imgRight.shape[1]-1) &(points2D_right[:, 1] < imgRight.shape[0]-1))
print('points2D_right init ->{}'.format(np.shape(points2D_right)))
points2D_right = points2D_right[inrange_right[0]].round().astype('int')
print('points2D_right now ->{}'.format(np.shape(points2D_right)))
#columns=["X", "Y", "Z","intens","ring"]
colors = np.array(np.load(cloud_file, mmap_mode='r'))[:, 3] #
# Color map for the points
colors = colors[inrange_left[0]]
cmap = matplotlib.cm.get_cmap('hsv')
colors = cmap(colors / np.max(colors))
print('colors -> {}, min:{}, max:{}'.format(np.shape(colors), np.min(colors), np.max(colors)))
colorImageLeft,colorImageRight = imgLeft.copy(),imgRight.copy()
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(20, 10.5, forward=True)
axs[0].imshow(imgLeft)
#axs[0].scatter(points2D_left[:,0],points2D_left[:,1], s=.1, c='green')
axs[0].scatter(points2D_left[:,0],points2D_left[:,1], s=.3, c=colors)
axs[0].set_title("Left image")
axs[1].set_title("Right image")
axs[1].imshow(imgRight)
#axs[1].scatter(points2D_right[:,0],points2D_right[:,1], s=.1, c='red')
# Color map for the points
colors = np.array(np.load(cloud_file, mmap_mode='r'))[:, 3] #
colors = colors[inrange_right[0]]
colors = cmap(colors / np.max(colors))
print('points2D_right->{}, colors->{}'.format(np.shape(points2D_right), np.shape(colors)))
axs[1].scatter(points2D_right[:,0],points2D_right[:,1], s=.1, c=colors)
fig.tight_layout()
plt.show()
points_left = objPoints_left[inrange_left[0]]
points_right = objPoints_right[inrange_right[0]]
print('points_left -> {}, colorImageLeft->{}'.format(np.shape(points_left), np.shape(colorImageLeft)))
print('points_right -> {}, colorImageRight->{}'.format(np.shape(points_right), np.shape(colorImageRight)))
colors_left = colorImageLeft[points2D_left[:, 1], points2D_left[:, 0], :]
colors_right = colorImageRight[points2D_right[:, 1], points2D_right[:, 0], :]
print('colors_left -> {}'.format(np.shape(colors_left)))
print('colors_right -> {}'.format(np.shape(colors_right)))
points = np.vstack((points_left,points_right))
color = np.vstack((colors_left,colors_right))
print('points->{}, color->{}'.format(np.shape(points), np.shape(color)))
#plt.show()
#self.write_ply('Lidar_cam.ply', points, color)
#self.view()
#plt.show()
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h * 6.0)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
def filterOcclusion(data):
print('data -> {}'.format(np.shape(data)))
# ---create a pandas Dataframe with X,Y,Z
print('Create a DataFrame')
df = pd.DataFrame(data, columns=['X','Y','Z','X3D','Y3X','Z3D','R','G','B'])
# ---sort it ascend by Z
print('Sort by Z')
df = df.sort_values(by=['Z'],kind='quicksort')
print('Data point after sorting------------------------------')
#---For each point create rectangle centered in current point
xGap,yGap = 20, 50
xOffset, yOffset = int(xGap / 2), int(yGap / 2)
def create_rectange(x,y,depth):
bl = [x-xOffset, y+yOffset] #bottom left
tr = [x+xOffset, y-yOffset] #top right
return [bl,tr,depth]
print('Adding rectangles')
#Rectangles = np.array([create_rectange(x=row['X'],y=row['Y'], depth = row['Z']) for index, row in df.iterrows()])
vfunc = np.vectorize(create_rectange)
Rectangles = vfunc(df['X'].values, df['Y'].values, df['Z'].values)
df['Rectangles'] = Rectangles
#Rectangles = np.asarray(Rectangles.tolist())
#print('Rectangles -> {}'.format(np.shape(Rectangles)))
#bl,tr = np.asarray(Rectangles[:,0].tolist()),np.asarray(Rectangles[:,0].tolist())
# 'bl0 -> {}'.format(np.shape(bl), np.shape(tr))
#df['bl0'] = bl[:,0]
#df['bl1'] = bl[:, 1]
#df['tr0'] = tr[:, 0]
#df['tr1'] = tr[:, 1]
# For each point, project it if it does not belong in prev 5 points
t = .5
def lies_inside(bl, tr, p, dist): #bottom_left, top_right, poin, distance_left, distance_right
if (p[0] > bl[0] and p[0] < tr[0] and p[1] < bl[1] and p[1] > tr[1]):
if abs(p[-1]-dist)>t:
return True
else:
return False
else:
return False
def lies_inside_(bl0,bl1, tr0,tr1, p0,p1,p2, dist): #bottom_left, top_right, poin, distance_left, distance_right
if (p0 > bl0 and p0 < tr0 and p1 < bl1 and p1 > tr1):
if abs(p2-dist)>t:
return True
else:
return False
else:
return False
lies_inside_ = np.vectorize(lies_inside_)
occluded = np.zeros_like(Z, dtype=bool)
projected = np.zeros_like(Z, dtype=bool)
df['occluded'] = occluded
df['projected'] = projected
idx = range(len(df))
df['idx'] = idx
df = df.set_index(['idx'])
# for each point check if the prev 5 points belongs to its rectangle -> if yes-> discard it
print('Compute neighbors')
from sklearn.neighbors import NearestNeighbors
X = np.array(df.iloc[:,0:2])
k=10
print('X -> {}'.format(np.shape(X)))
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
print('distances -> {}, indices->{}, df->{}'.format(np.shape(distances), np.shape(indices), np.shape(df)))
df['nbrs_indices'] = indices[:,1:].tolist()
print(df.head())
import time
start = time.time()
print('Start projection')
def soc_iter(i):
print(i)
# take the neighbours that are already projected and not occluded
nbrs = df.iloc[i, -1]
prev_points = df.iloc[nbrs] # .query('projected == 1 & occluded == 0') #5.82813405991 s
condition = (prev_points.projected == True) & (prev_points.occluded == False)
prev_points = prev_points[condition] # time = 156.481780052 s
# print('nbrs -> {}, prev_points->{}, condition1->{}'.format(np.shape(nbrs), np.shape(prev_points), np.shape(condition)))
if len(prev_points) > 0:
p = np.array(df.iloc[i, 0:3]) # current_point
# time = 156.481780052 s
Rectangles = prev_points['Rectangles']
occlusion = [lies_inside(bl=point[0], tr=point[1], p=p, dist=point[-1]) for point in Rectangles]
# time = 156.481780052 s
#occlusion = lies_inside_(prev_points['bl0'].values, prev_points['bl1'].values, prev_points['tr0'].values, prev_points['tr1'].values, p[0], p[1], p[-1], prev_points['Z'].values)
if np.any(occlusion):
# print('point {} is occluded'.format(p))
df.loc[i, 'occluded'] = True
df.loc[i, 'projected'] = True
soc_iter_vect = np.vectorize(soc_iter)
N = len(df)
m = np.linspace(start=1, stop=N-1, num=N-1, dtype=int)
print('m->{}, N:{}'.format(np.shape(m),N))
soc_iter_vect(m) # uncomment this
'''for i in range(1,2): #len(df)
print i
# take the neighbours that are already projected and not occluded
nbrs = df.iloc[i, -1]
prev_points = df.iloc[nbrs]#.query('projected == 1 & occluded == 0') #5.82813405991 s
condition = (prev_points.projected == True) & (prev_points.occluded == False)
prev_points = prev_points[condition] #time = 156.481780052 s
#print('nbrs -> {}, prev_points->{}, condition1->{}'.format(np.shape(nbrs), np.shape(prev_points), np.shape(condition)))
if len(prev_points)>0:
p = np.array(df.iloc[i, 0:3]) #current_point
# time = 303.82229900
#occlusion = (p[0] > (prev_points.X-xOffset)) & (p[0] < (prev_points.X+xOffset)) & (p[1] < (prev_points.Y+yOffset)) & (p[1] > (prev_points.Y-yOffset)) & (abs(p[-1] - prev_points.Z) > .3)
#time = 156.481780052 s
Rectangles = prev_points['Rectangles']
occlusion = np.array([lies_inside(bl=point[0], tr=point[1], p=p, dist=point[-1]) for point in Rectangles])
if np.any(occlusion):
#print('point {} is occluded'.format(p))
df.loc[i,'occluded'] = True
df.loc[i, 'projected'] = True'''
#soc_iter_vect(1)
end = time.time()
print('the publish took {}'.format(end - start))
print(df.head())
Points = np.array(df[df['occluded']==False]).squeeze()
good_points = Points[:,0:2].astype('int')
distance = Points[:,2]
_3Dpoint = Points[:,3:6]
_3Dcolor = Points[:, 6:9]
MIN_DISTANCE, MAX_DISTANCE = np.min(distance), np.max(distance)
colours = (distance - MIN_DISTANCE) / (MAX_DISTANCE - MIN_DISTANCE)
colours = np.asarray([np.asarray(hsv_to_rgb( c, np.sqrt(1), 1.0)) for c in colours])
cols = 255 * colours
return good_points, cols,_3Dpoint, _3Dcolor
def filterOcclusion_(data):
print('data -> {}'.format(np.shape(data)))
# ---create a pandas Dataframe with X,Y,Z
print('Create a DataFrame')
df = pd.DataFrame(data, columns=['X','Y','Z','X3D','Y3X','Z3D','R','G','B'])
# ---sort it ascend by Z
print('Sort by Z')
df = df.sort_values(by=['Z'],kind='quicksort')
print('Data point after sorting------------------------------')
#---For each point create rectangle centered in current point
xGap,yGap = 20, 50
xOffset, yOffset = int(xGap / 2), int(yGap / 2)
def create_rectange(x,y,depth):
bl = [x-xOffset, y+yOffset] #bottom left
tr = [x+xOffset, y-yOffset] #top right
return [bl,tr,depth]
print('Adding rectangles')
#Rectangles = np.array([create_rectange(x=row['X'],y=row['Y'], depth = row['Z']) for index, row in df.iterrows()])
vfunc = np.vectorize(create_rectange)
Rectangles = vfunc(df['X'].values, df['Y'].values, df['Z'].values)
df['Rectangles'] = Rectangles
t = .5
def lies_inside(bl, tr, p, dist): #bottom_left, top_right, poin, distance_left, distance_right
if (p[0] > bl[0] and p[0] < tr[0] and p[1] < bl[1] and p[1] > tr[1]):
if abs(p[-1]-dist)>t:
return True
else:
return False
else:
return False
def lies_inside_(bl0,bl1, tr0,tr1, p0,p1,p2, dist): #bottom_left, top_right, poin, distance_left, distance_right
if (p0 > bl0 and p0 < tr0 and p1 < bl1 and p1 > tr1):
if abs(p2-dist)>t:
return True
else:
return False
else:
return False
lies_inside_ = np.vectorize(lies_inside_)
occluded = np.zeros_like(Z, dtype=bool)
projected = np.zeros_like(Z, dtype=bool)
df['occluded'] = occluded
df['projected'] = projected
idx = range(len(df))
df['idx'] = idx
df = df.set_index(['idx'])
# for each point check if the prev 5 points belongs to its rectangle -> if yes-> discard it
print('Compute neighbors')
from sklearn.neighbors import NearestNeighbors
#X = np.array(df.iloc[:,0:2])
X = np.array(df.iloc[:, 1])
nbrs = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
print('distances -> {}, indices->{}, df->{}'.format(np.shape(distances), np.shape(indices), np.shape(df)))
df['nbrs_indices'] = indices[:,1:].tolist()
print(df.head())
import time
start = time.time()
print('Start projection')
def soc_iter(i):
print(i)
# take the neighbours that are already projected and not occluded
nbrs = df.iloc[i, -1]
prev_points = df.iloc[nbrs] # .query('projected == 1 & occluded == 0') #5.82813405991 s
condition = (prev_points.projected == True) & (prev_points.occluded == False)
prev_points = prev_points[condition] # time = 156.481780052 s
# print('nbrs -> {}, prev_points->{}, condition1->{}'.format(np.shape(nbrs), np.shape(prev_points), np.shape(condition)))
if len(prev_points) > 0:
p = np.array(df.iloc[i, 0:3]) # current_point
# time = 156.481780052 s
Rectangles = prev_points['Rectangles']
occlusion = [lies_inside(bl=point[0], tr=point[1], p=p, dist=point[-1]) for point in Rectangles]
# time = 156.481780052 s
#occlusion = lies_inside_(prev_points['bl0'].values, prev_points['bl1'].values, prev_points['tr0'].values, prev_points['tr1'].values, p[0], p[1], p[-1], prev_points['Z'].values)
if np.any(occlusion):
# print('point {} is occluded'.format(p))
df.loc[i, 'occluded'] = True
df.loc[i, 'projected'] = True
soc_iter_vect = np.vectorize(soc_iter)
N = len(df)
m = | np.linspace(start=1, stop=N-1, num=N-1, dtype=int) | numpy.linspace |
"""Tests of the homogeneity module"""
import unittest
import dcor
import numpy as np
class TestEnergyTest(unittest.TestCase):
"""Tests for the homogeneity energy test function."""
def test_same_distribution_same_parameters(self):
"""
Test that the test works on equal distributions.
As the distributions are the same, the test should not reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean = np.zeros(vector_size)
cov = np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean,
cov=cov,
size=num_samples)
b = random_state.multivariate_normal(mean=mean,
cov=cov,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertGreater(result.p_value, significance)
def test_same_distribution_different_means(self):
"""
Test that the test works on distributions with different means.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean_0 = np.zeros(vector_size)
mean_1 = np.ones(vector_size)
cov = np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean_0, cov=cov,
size=num_samples)
b = random_state.multivariate_normal(mean=mean_1, cov=cov,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertLess(result.p_value, significance)
def test_same_distribution_different_covariances(self):
"""
Test that the test works on distributions with different covariance.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean = np.zeros(vector_size)
cov_0 = np.eye(vector_size)
cov_1 = 3 * np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean, cov=cov_0,
size=num_samples)
b = random_state.multivariate_normal(mean=mean, cov=cov_1,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertLess(result.p_value, significance)
def test_different_distributions(self):
"""
Test that the test works on different distributions.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
num_samples = 100
random_state = np.random.RandomState(0)
a = random_state.standard_normal(size=(num_samples, 1))
b = random_state.standard_t(df=1, size=(num_samples, 1))
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertLess(result.p_value, significance)
def test_different_means_median(self):
"""
Test that the test works on the same distribution with different means,
using the median average.
"""
num_samples = 100
random_state = np.random.RandomState(0)
a = random_state.normal(loc=0, size=(num_samples, 1))
b = random_state.normal(loc=1, size=(num_samples, 1))
significance = 0.01
num_resamples = int(3 / significance + 1)
median_result = dcor.homogeneity.energy_test(
a,
b,
num_resamples=num_resamples,
random_state=random_state,
average=np.median
)
mean_result = dcor.homogeneity.energy_test(
a,
b,
num_resamples=num_resamples,
random_state=random_state,
average=np.mean
)
# Check that we are actually using a different average
self.assertNotAlmostEqual(
mean_result.statistic,
median_result.statistic
)
# Check that we detected the heterogeneity
self.assertLess(median_result.p_value, significance)
def test_different_distributions_median(self):
"""
Test that the test works on different distributions using the median.
"""
num_samples = 100
random_state = | np.random.RandomState(0) | numpy.random.RandomState |
import gemmi
import numpy as np
import pytest
import reciprocalspaceship as rs
@pytest.mark.parametrize(
"sfs_phases",
[
( | np.random.rand(10) | numpy.random.rand |
import numpy as np
import tensorflow as tf
import arff
from sklearn.metrics import matthews_corrcoef
from sklearn.model_selection import train_test_split
from sklearn import svm
from neuron import *
#this is the same util2 used for the first assigment
from util2 import Arff2Skl
import logging
import sys
# Set the logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class phishing_detector():
# Init the class
def __init__(self, features_selected):
# check only features_selected has values of 0s and 1s
if ~((features_selected == 0) | (features_selected == 1)).all():
logger.error("There are values others than 0 or 1")
raise ValueError("features_selected must contain only 0 or 1")
# check features_selected has len of 30
if len(features_selected) is not 30:
logger.error("The input is not a vector size 30")
raise ValueError("features_selected must be an array of 30 values")
# At least one of the values in features_selected must be 1
if ((features_selected == 0)).all():
logger.error("There must be at least one feature selected with 1")
raise ValueError("At least one of the values in features_selected must be 1")
# the input is the features to be selcted
self.features_selected = features_selected
# get the data from the data set
dataset = arff.load(open('phishing.arff', 'r'))
data = | np.array(dataset['data']) | numpy.array |
import numpy as np
from numpy.linalg import matrix_power, pinv
from scipy.integrate import quad, solve_ivp
from scipy.linalg import inv
from scipy.special import factorial, binom
from tqdm import tqdm
from functools import lru_cache
from typing import List, Callable, Union, Tuple
from copy import copy
from sympy import Symbol, ImmutableMatrix
from . import helpers_reservoir as hr
from . import picklegzip
from . TimeStepIterator import TimeStep, TimeStepIterator
class DMRError(Exception):
"""Generic error occurring in this module."""
pass
##############################################################################
class DiscreteModelRun():
def __init__(self, times, Bs, xs):
"""
Bs State transition operators for one time step
"""
self.times = times
self.Bs = Bs
self.xs = xs
def acc_net_internal_flux_matrix(self):
Bs = self.Bs
xs = self.xs
return hr.net_Fs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_output_vector(self):
xs = self.xs
Bs = self.Bs
return hr.net_Rs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_input_vector(self):
xs = self.xs
Bs = self.Bs
return hr.net_Us_from_discrete_Bs_and_xs(Bs, xs)
@property
def start_values(self):
return self.xs[0, :]
@property
def nr_pools(self):
return len(self.start_values)
@classmethod
def from_Bs_and_net_Us(cls, start_values, times, Bs, net_Us):
"""
Bs State transition operators for one time step
"""
xs = cls._solve(start_values, Bs, net_Us)
return cls(times, Bs, xs)
@classmethod
def from_Bs_and_Us_2(cls, start_values, times, Bs, Us):
"""
Bs State transition operators for one time step
"""
xs = cls._solve_2(start_values, Bs, Us)
dmr = cls(times, Bs, xs)
dmr.Us = Us
return dmr
@classmethod
def from_fluxes(cls, start_values, times, net_Us, net_Fs, net_Rs):
Bs = cls.reconstruct_Bs_without_xs(
start_values,
net_Us,
net_Fs,
net_Rs
)
return cls.from_Bs_and_net_Us(
start_values,
times,
Bs,
net_Us
)
@classmethod
def from_fluxes_2(cls, start_values, times, Us, Fs, Rs):
Us[ | np.abs(Us) | numpy.abs |
import os
import warnings
warnings.filterwarnings("ignore")
import json
import csv
import numpy as np
from PIL import Image
from scipy.io import loadmat
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms as trn
from mit_semseg.utils import find_recursive
from mit_semseg.lib.utils import as_numpy
from mit_semseg.config import cfg
from sphericalpadding import *
from segm.model.utils import inference
from segm.model.factory import load_model
from pathlib import Path
import segm.utils.torch as ptu
from segm.data.utils import STATS
import torchvision.transforms.functional as F
import time
from segm.data.ade20k import ADE20K_CATS_PATH
from segm.data.utils import dataset_cat_description, seg_to_rgb
from scipy.ndimage import map_coordinates
class Sphegmenter:
def __init__(self, model_path='seg_tiny_mask/checkpoint.pth'):
self.batch_size = 6
self.CUDA=True
padding = 100
self.cube_pad = SpherePad(padding)
# Network Builders
ptu.set_gpu_mode(self.CUDA)
model_dir = Path(model_path).parent
self.model, self.variant = load_model(model_path)
self.model.to(ptu.device)
normalization_name = self.variant["dataset_kwargs"]["normalization"]
self.normalization = STATS[normalization_name]
self.device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
def predict(self,image_path):
Im = Image.open(image_path).convert('RGB')
cube_size_ori = Im.width//4
output_w_ori = Im.width
output_h_ori = Im.height
cube_fov=90
c2e = Cube2Equirec(self.batch_size, cube_size_ori, output_h_ori, output_w_ori, cube_fov, self.CUDA)
e2c = Equirec2Cube(self.batch_size, output_h_ori, output_w_ori, cube_size_ori, cube_fov, self.CUDA)
self.merge = np.zeros((Im.height, Im.width, 3))
results=self.segment(Im,image_path,e2c,cube_size_ori)
torch.cuda.empty_cache()
im = np.roll(np.array(Im), Im.width//4, axis=1)
im=Image.fromarray(im)
cube_size_roll = Im.width//4
output_w_roll = Im.width
output_h_roll = Im.height
result_roll = self.segment(im,image_path,e2c,cube_size_roll)
starttime_=time.time()
new_maskss,images_names=compare_roll_ori(results,result_roll)
print('Comparing rolled and original time',time.time()-starttime_)
plot_contours(new_maskss,images_names)
print('plotting contours X,y to u,v conversion time',time.time()-starttime_)
return
def segment(self,Im,image_path,e2c,cube_size_roll):
ow, oh = Im.size
im = np.array(Im)
if self.CUDA:
im_tensor = Variable(torch.cuda.FloatTensor(im).unsqueeze(0).transpose(1, 3).transpose(2, 3))
else:
im_tensor = Variable(torch.FloatTensor(im).unsqueeze(0).transpose(1, 3).transpose(2, 3))
im_cube = e2c.ToCubeTensor(im_tensor)
padded_original = self.cube_pad(torch.stack([x.squeeze(0) for x in im_cube]))
padded_faces = [padded_original[i].permute(1,2,0).cpu().numpy().astype(np.uint8) for i in range(6)]
del padded_original
del im_cube
del im_tensor
image_name=image_path.split('/')[-1].split('.')[0]
pf_folder_name="padded_faces"+image_name
os.makedirs(pf_folder_name, exist_ok=True) # succeeds even if directory exists.
for i, side in enumerate(['back', 'bottom', 'front', 'left', 'right', 'top']):
Image.fromarray(padded_faces[i]).save(pf_folder_name+'/'+side+'.jpg')
if os.path.isdir(pf_folder_name):
imgs = find_recursive(pf_folder_name)
else:
imgs = [pf_folder_name]
assert len(imgs), "imgs should be a path to image (.jpg) or directory."
list_test = [{'fpath_img': x} for x in imgs]
results = {"probs":[],
"preds":[],
"rimgs":[],
"image_name":[]}
for img_path in imgs:
with torch.no_grad():
pil_im = Image.open(img_path).convert('RGB')
im_meta = dict(flip=False)
im = F.pil_to_tensor(pil_im).float() / 255
im = F.normalize(im, self.normalization["mean"], self.normalization["std"])
segSize=(im.shape[1],
im.shape[2])
im = im.pin_memory().to(ptu.device).unsqueeze(0)
pred_tmp = inference(self.model,[im],[im_meta],ori_shape=im.shape[2:4],window_size=self.variant["inference_kwargs"]["window_size"],window_stride=self.variant["inference_kwargs"]["window_stride"],batch_size=2)
cat_names, cat_colors = dataset_cat_description(ADE20K_CATS_PATH)
seg_map = pred_tmp.argmax(0, keepdim=True)
seg_rgb = seg_to_rgb(seg_map, cat_colors)
seg_rgb = (255 * seg_rgb.squeeze().cpu().numpy()).astype(np.uint8)
pred_tmp=pred_tmp.unsqueeze(0)
prob, pred = torch.max(pred_tmp[0].unsqueeze(0), dim=1)
pred=pred.squeeze(0)
visualize_result_optimal( (np.transpose(np.array(pil_im), (1, 0, 2)), img_path),as_numpy(pred.cpu()),cfg,results)
results['preds'].append( pred)
results['probs'].append( prob.squeeze(0))
results["image_name"].append(image_name)
seg_folder_name="seg_results"+image_path.split('/')[-1].split('.')[0]
os.makedirs(seg_folder_name, exist_ok=True)
for i, side in enumerate(["back", "bottom", "front", "left", "right", "top"]): #[3,2,4,0]
Image.fromarray((results['rimgs'][i][100:-100, 100:-100]*255).astype(np.uint8)).save(seg_folder_name+'/'+side+'.jpg')
return results
def visualize(self, pred):
pred = np.int32(pred)
pred_color = colorEncode(pred, colors).astype(np.uint8)
return pred_color
def combined_results(self, input_path, result, cube_size,output_w,output_h,c2e,padding=100, roll=False):
batch_size = 6
cube_fov = 90
CUDA = True
seg_cube = torch.stack([torch.from_numpy(np.array(result['rimgs'][i])[padding:-padding, padding:-padding]).permute(2, 0, 1).float().cuda() for i in range(6)])
im_equi_ori = c2e.ToEquirecTensor(seg_cube)
im_equi_ori = im_equi_ori.to(torch.uint8).transpose(1, 3).transpose(1, 2).cpu().numpy()[0]#.astype(np.uint8)
seg_probs = torch.stack([torch.cat(
[result['probs'][i][padding:-padding, padding:-padding].unsqueeze(2),
result['probs'][i][padding:-padding, padding:-padding].unsqueeze(2),
result['probs'][i][padding:-padding, padding:-padding].unsqueeze(2)],dim=2).permute(2, 0, 1)
for i in range(6)
]
)
prob_equi_ori = c2e.ToEquirecTensor(seg_probs)
prob_equi_ori = prob_equi_ori.transpose(1, 3).transpose(1, 2).data.cpu().numpy()[0].astype(np.uint8)
if roll:
im_equi_ori = np.roll(im_equi_ori, -output_w//4, axis=1)
prob_equi_ori = np.roll(prob_equi_ori, -output_w//4, axis=1)
return im_equi_ori,prob_equi_ori,None
colors = loadmat('data/color150.mat')['colors']
color_codes_dict={}
iter=1
for x in colors:
color_codes_dict[tuple(int(y) for y in x)]=iter
iter+=1
def pyfunc(x):
return color_codes_dict[tuple(x)]
vfunc = np.vectorize(pyfunc, signature='(3)->()')
def plot_contours(new_maskss,image_names):
images_names=image_names
ratio=1
n_classes=150
smooth_alpha = 0.0012
polygons=[]
class_uv=[]
back_g_faces=[]
for index,im_no in enumerate([3,2,4,0,1,5]):
img=Image.fromarray(new_maskss[im_no].astype(np.uint8))
img = np.array(img)
# translate 3d color vectors to scalar ints [0, ..., n_classes]
mask = vfunc(img)
h, w = mask.shape
back_g=np.zeros((h,w,3))
mask_classes=list(np.unique(mask))
for class_k in mask_classes:
class_k=int(class_k)
contours=[]
approxes=[]
new_mask = np.zeros_like(mask, dtype='uint8')
# get binary mask for current class
new_mask[mask == class_k] = 1
new_mask[mask != class_k] = 0
uvgrid = equirect_uvgrid(h, w)
contours, _ = cv2.findContours(new_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
epsilon = smooth_alpha * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
back_g=cv2.drawContours(back_g, cnt, -1, (255,255,255), 5)
uv_approx = []
for i in range(len(approx)):
y, x = approx[i][0]
u=uvgrid[x, y][0]
v=uvgrid[x, y][1]
if index>0 and index !=4 and index!=5:
uv_approx.append([(u+int(index*np.pi)).tolist(),v.tolist()])
elif index == 4:
uv_approx.append([(u+int(np.pi)).tolist(),v.tolist()])
elif index == 5:
uv_approx.append([(u+int(np.pi)).tolist(),v.tolist()])
else:
uv_approx.append([u.tolist(),v.tolist()])
polygons.append(uv_approx)
class_uv.append(class_k)
back_g_faces.append(back_g)
combined_imgae=np.hstack((back_g_faces[0],back_g_faces[1]))
plt.imshow(combined_imgae)
plt.show()
class_polygons_dict = {}
for x, y in zip(class_uv, polygons):
class_polygons_dict.setdefault(x, []).append(y)
######################################################## combine all the u,v points################################
json_dict = dict.fromkeys(['Image_name','Classes'])
json_dict['Image_name']=images_names
json_dict['Classes']=class_polygons_dict
with open('uv_polygons_face1_2offsetpi.json', 'w') as fp:
json.dump(json_dict, fp)
return fp
names = {}
with open('data/object150_info.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
names[int(row[0])] = row[5].split(";")[0]
def equirect_facetype(h, w):
'''
0F 1R 2B 3L 4U 5D
'''
tp = np.roll(np.arange(4).repeat(w // 4)[None, :].repeat(h, 0), 3 * w // 8, 1)
# Prepare ceil mask
mask = np.zeros((h, w // 4), np.bool)
idx = np.linspace(-np.pi, np.pi, w // 4) / 4
idx = h // 2 - np.round(np.arctan(np.cos(idx)) * h / np.pi).astype(int)
for i, j in enumerate(idx):
mask[:j, i] = 1
mask = np.roll(np.concatenate([mask] * 4, 1), 3 * w // 8, 1)
tp[mask] = 4
tp[np.flip(mask, 0)] = 5
return tp.astype(np.int32)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate( | np.nonzero(flag) | numpy.nonzero |
'''
Encoding Visual Attributes in Capsules for Explainable Medical Diagnoses (X-Caps)
Original Paper by <NAME>, <NAME>, and <NAME> (https://arxiv.org/abs/1909.05926)
Code written by: <NAME>
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at <EMAIL>.
This file is used for testing models. Please see the README for details about training.
'''
import csv
import os
import numpy as np
from PIL import Image
from sklearn.metrics import confusion_matrix
from keras import backend as K
K.set_image_data_format('channels_last')
from keras.preprocessing.image import ImageDataGenerator
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from model_helper import compile_model
from load_nodule_data import get_pseudo_label, normalize_img
from capsule_layers import combine_images
from utils import safe_mkdir
def compute_within_one_acc(cmat):
eps = 1e-7
class_acc = []
for i, row in enumerate(cmat):
curr_acc = 0
try:
curr_acc += row[i-1]
except:
pass
try:
curr_acc += row[i]
except:
pass
try:
curr_acc += row[i+1]
except:
pass
class_acc.append(curr_acc)
class_acc = np.asarray(class_acc)
return class_acc / (cmat.sum(axis=1)+eps), np.sum(class_acc) / cmat.sum()
def test(args, u_model, test_samples):
out_dir = os.path.join(args.data_root_dir, 'results', args.exp_name, args.net)
safe_mkdir(out_dir
)
out_img_dir = os.path.join(out_dir, 'recons')
safe_mkdir(out_img_dir)
# Compile the loaded model
model = compile_model(args=args, uncomp_model=u_model)
# Load testing weights
if args.test_weights_path != '':
output_filename = os.path.join(out_dir, 'results_' + os.path.basename(args.test_weights_path)[:-5] + '.csv')
try:
model.load_weights(args.test_weights_path)
except Exception as e:
print(e)
raise Exception('Failed to load weights from training.')
else:
output_filename = os.path.join(out_dir, 'results_' + args.output_name + '_model_' + args.time + '.csv')
try:
model.load_weights(os.path.join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5'))
except Exception as e:
print(e)
raise Exception('Failed to load weights from training.')
test_datagen = ImageDataGenerator(
samplewise_center=False,
samplewise_std_normalization=False,
rescale=None)
# TESTING SECTION
def data_gen(gen):
while True:
x, y = gen.next()
yield x, y
x_test = normalize_img(np.expand_dims(test_samples[0], axis=-1).astype(np.float32))
if args.num_classes == 1:
y_test = | np.rint(test_samples[2][:,-2]) | numpy.rint |
##
# \file stack.py
# \brief { item_description }
#
# \author <NAME> (<EMAIL>)
# \date September 2015
#
import os
import re
import numpy as np
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
import simplereg.resampler
import niftymic.base.slice as sl
import niftymic.base.exceptions as exceptions
import niftymic.base.data_writer as dw
from niftymic.definitions import ALLOWED_EXTENSIONS, VIEWER
##
# In addition to the nifti-image (stored as sitk.Image object) this class Stack
# also contains additional variables helpful to work with the data.
#
class Stack:
def __init__(self):
self._is_unity_mask = True
self._deleted_slices = []
self._history_affine_transforms = []
self._history_motion_corrections = []
##
# Create Stack instance from file and add corresponding mask. Mask is
# either provided in the directory or created as binary mask consisting of
# ones.
# \param[in] dir_input string to input directory of nifti-file to read
# \param[in] filename string of nifti-file to read
# \param[in] suffix_mask extension of stack filename which indicates
# associated mask
# \return Stack object including its slices with corresponding masks
#
@classmethod
def from_filename(cls,
file_path,
file_path_mask=None,
extract_slices=True,
verbose=False,
slice_thickness=None,
):
stack = cls()
if not ph.file_exists(file_path):
raise exceptions.FileNotExistent(file_path)
path_to_directory = os.path.dirname(file_path)
# Strip extension from filename and remove potentially included "."
filename = [re.sub("." + ext, "", os.path.basename(file_path))
for ext in ALLOWED_EXTENSIONS
if file_path.endswith(ext)][0]
# filename = filename.replace(".", "p")
stack._dir = os.path.dirname(file_path)
stack._filename = filename
# Append stacks as SimpleITK and ITK Image objects
stack.sitk = sitkh.read_nifti_image_sitk(file_path, sitk.sitkFloat64)
stack.itk = sitkh.get_itk_from_sitk_image(stack.sitk)
# Set slice thickness of acquisition
if slice_thickness is None:
stack._slice_thickness = stack.sitk.GetSpacing()[-1]
else:
stack._slice_thickness = slice_thickness
# Append masks (either provided or binary mask)
if file_path_mask is None:
stack.sitk_mask = stack._generate_identity_mask()
if verbose:
ph.print_info(
"Identity mask created for '%s'." % (file_path))
else:
if not ph.file_exists(file_path_mask):
raise exceptions.FileNotExistent(file_path_mask)
stack.sitk_mask = sitkh.read_nifti_image_sitk(
file_path_mask, sitk.sitkUInt8)
try:
# ensure masks occupy same physical space
stack.sitk_mask.CopyInformation(stack.sitk)
except RuntimeError as e:
raise IOError(
"Given image and its mask do not occupy the same space: %s" %
e.message)
stack._is_unity_mask = False
# Check that binary mask is provided
nda_mask = sitk.GetArrayFromImage(stack.sitk_mask)
if nda_mask.max() > 1:
raise ValueError(
"Mask values > 1 encountered in '%s'. "
"Only binary masks are allowed." % file_path_mask)
# Append itk object
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
# Store current affine transform of image
stack._affine_transform_sitk = sitkh.get_sitk_affine_transform_from_sitk_image(
stack.sitk)
# Prepare history of affine transforms, i.e. encoded spatial
# position+orientation of stack, and motion estimates of stack
# obtained in the course of the registration/reconstruction process
stack._history_affine_transforms = []
stack._history_affine_transforms.append(stack._affine_transform_sitk)
stack._history_motion_corrections = []
stack._history_motion_corrections.append(sitk.Euler3DTransform())
# Extract all slices and their masks from the stack and store them
if extract_slices:
dimenson = stack.sitk.GetDimension()
if dimenson == 3:
stack._N_slices = stack.sitk.GetSize()[-1]
stack._slices = stack._extract_slices(
slice_thickness=stack.get_slice_thickness())
elif dimenson == 2:
stack._N_slices = 1
stack._slices = [stack.sitk[:, :]]
else:
stack._N_slices = 0
stack._slices = None
if verbose:
ph.print_info(
"Stack (image + mask) associated to '%s' successfully read." %
(file_path))
return stack
##
# Create Stack instance from stack slices in specified directory and add
# corresponding mask.
# \date 2017-08-15 19:18:56+0100
#
# \param cls The cls
# \param[in] dir_input string to input directory where bundle
# of slices are stored
# \param[in] prefix_stack prefix indicating the corresponding
# stack
# \param[in] suffix_mask extension of stack filename which
# indicates associated mask
# \param dic_slice_filenames Dictionary linking slice number (int)
# with filename (without extension)
# \param prefix_slice The prefix slice
#
# \return Stack object including its slices with corresponding masks
# \example mask (suffix_mask) of slice j of stack i (prefix_stack)
# reads: i_slicej_mask.nii.gz
#
# TODO: Code cleaning
@classmethod
def from_slice_filenames(cls,
dir_input,
prefix_stack,
suffix_mask=None,
dic_slice_filenames=None,
prefix_slice="_slice",
slice_thickness=None,
):
stack = cls()
if dir_input[-1] is not "/":
dir_input += "/"
stack._dir = dir_input
stack._filename = prefix_stack
# Get 3D images
stack.sitk = sitkh.read_nifti_image_sitk(
dir_input + prefix_stack + ".nii.gz", sitk.sitkFloat64)
stack.itk = sitkh.get_itk_from_sitk_image(stack.sitk)
# Store current affine transform of image
stack._affine_transform_sitk = sitkh.get_sitk_affine_transform_from_sitk_image(
stack.sitk)
# Prepare history of affine transforms, i.e. encoded spatial
# position+orientation of stack, and motion estimates of stack
# obtained in the course of the registration/reconstruction process
stack._history_affine_transforms = []
stack._history_affine_transforms.append(stack._affine_transform_sitk)
stack._history_motion_corrections = []
stack._history_motion_corrections.append(sitk.Euler3DTransform())
# Set slice thickness of acquisition
if slice_thickness is None:
stack._slice_thickness = float(stack.sitk.GetSpacing()[-1])
else:
stack._slice_thickness = float(slice_thickness)
# Append masks (either provided or binary mask)
if suffix_mask is not None and \
os.path.isfile(dir_input +
prefix_stack + suffix_mask + ".nii.gz"):
stack.sitk_mask = sitkh.read_nifti_image_sitk(
dir_input + prefix_stack + suffix_mask + ".nii.gz",
sitk.sitkUInt8)
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
stack._is_unity_mask = False
else:
stack.sitk_mask = stack._generate_identity_mask()
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
stack._is_unity_mask = True
# Get slices
if dic_slice_filenames is None:
stack._N_slices = stack.sitk.GetDepth()
stack._slices = [None] * stack._N_slices
# Append slices as Slice objects
for i in range(0, stack._N_slices):
path_to_slice = os.path.join(
dir_input,
prefix_stack + prefix_slice + str(i) + ".nii.gz")
path_to_slice_mask = os.path.join(
dir_input,
prefix_stack + prefix_slice + str(i) + suffix_mask + ".nii.gz")
if ph.file_exists(path_to_slice_mask):
stack._slices[i] = sl.Slice.from_filename(
file_path=path_to_slice,
slice_number=i,
file_path_mask=path_to_slice_mask)
else:
stack._slices[i] = sl.Slice.from_filename(
file_path=path_to_slice,
slice_number=i)
else:
slice_numbers = sorted(dic_slice_filenames.keys())
stack._N_slices = len(slice_numbers)
stack._slices = [None] * stack._N_slices
for i, slice_number in enumerate(slice_numbers):
path_to_slice = os.path.join(
dir_input,
dic_slice_filenames[slice_number] + ".nii.gz")
path_to_slice_mask = os.path.join(
dir_input, dic_slice_filenames[slice_number] + suffix_mask + ".nii.gz")
if ph.file_exists(path_to_slice_mask):
stack._slices[i] = sl.Slice.from_filename(
file_path=path_to_slice,
slice_number=slice_number,
file_path_mask=path_to_slice_mask,
slice_thickness=stack.get_slice_thickness(),
)
else:
stack._slices[i] = sl.Slice.from_filename(
file_path=path_to_slice,
slice_number=slice_number,
slice_thickness=stack.get_slice_thickness(),
)
return stack
# Create Stack instance from exisiting sitk.Image instance. Slices are
# not extracted and stored separately in the object. The idea is to use
# this function when the stack is regarded as entire volume (like the
# reconstructed HR volume).
# \param[in] image_sitk sitk.Image created from nifti-file
# \param[in] name string containing the chosen name for the stack
# \param[in] image_sitk_mask associated mask of stack, sitk.Image object (optional)
# \return Stack object without slice information
@classmethod
def from_sitk_image(cls,
image_sitk,
slice_thickness,
filename="unknown",
image_sitk_mask=None,
extract_slices=True,
slice_numbers=None,
):
stack = cls()
# Explicit cast (+ creation of other image instance)
stack.sitk = sitk.Cast(image_sitk, sitk.sitkFloat64)
stack.itk = sitkh.get_itk_from_sitk_image(stack.sitk)
# Set slice thickness of acquisition
if not ph.is_float(slice_thickness):
raise ValueError("Slice thickness must be of type float")
stack._slice_thickness = float(slice_thickness)
stack._filename = filename
stack._dir = None
# Append masks (if provided)
if image_sitk_mask is not None:
stack.sitk_mask = sitk.Cast(image_sitk_mask, sitk.sitkUInt8)
try:
# ensure mask occupies the same physical space
stack.sitk_mask.CopyInformation(stack.sitk)
except RuntimeError as e:
raise IOError(
"Given image and its mask do not occupy the same space: %s" %
e.message)
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
if sitk.GetArrayFromImage(stack.sitk_mask).prod() == 1:
stack._is_unity_mask = True
else:
stack._is_unity_mask = False
else:
stack.sitk_mask = stack._generate_identity_mask()
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
stack._is_unity_mask = True
# Extract all slices and their masks from the stack and store them
if extract_slices:
stack._N_slices = stack.sitk.GetSize()[-1]
stack._slices = stack._extract_slices(
slice_numbers=slice_numbers,
slice_thickness=slice_thickness,
)
else:
stack._N_slices = 0
stack._slices = None
# Store current affine transform of image
stack._affine_transform_sitk = sitkh.get_sitk_affine_transform_from_sitk_image(
stack.sitk)
stack._history_affine_transforms = []
stack._history_affine_transforms.append(stack._affine_transform_sitk)
stack._history_motion_corrections = []
stack._history_motion_corrections.append(sitk.Euler3DTransform())
return stack
##
# Copy constructor
# \date 2019-01-15 16:55:09+0000
#
# \param cls The cls
# \param stack_to_copy Stack object to be copied
# \param filename The filename
#
# \return copied Stack object TODO: That's not really well done
#
@classmethod
def from_stack(cls, stack_to_copy, filename=None):
stack = cls()
if not isinstance(stack_to_copy, Stack):
raise ValueError("Input must be of type Stack. Given: %s" %
type(stack_to_copy))
# Copy image stack and mask
stack.sitk = sitk.Image(stack_to_copy.sitk)
stack.itk = sitkh.get_itk_from_sitk_image(stack.sitk)
stack._slice_thickness = stack_to_copy.get_slice_thickness()
stack.sitk_mask = sitk.Image(stack_to_copy.sitk_mask)
stack.itk_mask = sitkh.get_itk_from_sitk_image(stack.sitk_mask)
stack._is_unity_mask = stack_to_copy.is_unity_mask()
if filename is None:
stack._filename = stack_to_copy.get_filename()
else:
stack._filename = filename
stack._dir = stack_to_copy.get_directory()
stack._deleted_slices = stack_to_copy.get_deleted_slice_numbers()
# Store current affine transform of image
stack.set_registration_history(
stack_to_copy.get_registration_history())
# Extract all slices and their masks from the stack and store them if
# given
if stack_to_copy.get_slices() is not None:
stack._N_slices = stack_to_copy.get_number_of_slices()
stack._slices = [None] * stack._N_slices
slices_to_copy = stack_to_copy.get_slices()
for j, slice_j in enumerate(slices_to_copy):
stack._slices[j] = sl.Slice.from_slice(slice_j)
else:
stack._N_slices = 0
stack._slices = None
return stack
# @classmethod
# def from_Stack(cls, class_instance):
# data = copy.deepcopy(class_instance) # if deepcopy is necessary
# return cls(data)
# def __deepcopy__(self, memo):
# print '__deepcopy__(%s)' % str(memo)
# return Stack(copy.deepcopy(memo))
# def copy(self):
# return copy.deepcopy(self)
# Get all slices of current stack
# \return Array of sitk.Images containing slices in 3D space
def get_slices(self):
if self._slices is None:
return None
else:
return [s for s in self._slices if s is not None]
##
# Get one particular slice of current stack
# \date 2018-04-18 22:06:38-0600
#
# \param self The object
# \param index slice index as integer
#
# \return requested 3D slice of stack as Slice object
#
def get_slice(self, index):
index = int(index)
if abs(index) > self._N_slices - 1:
raise ValueError(
"Enter a valid index between -%s and %s. Tried: %s" %
(self._N_slices - 1, self._N_slices - 1, index))
return self._slices[index]
def get_slice_thickness(self):
return float(self._slice_thickness)
def get_inplane_resolution(self):
return float(self.sitk.GetSpacing()[0])
##
# Gets the deleted slice numbers, i.e. misregistered slice numbers detected
# by robust outlier algorithm. Indices refer to slice numbers within
# original stack
# \date 2018-07-08 23:06:24-0600
#
# \param self The object
#
# \return The deleted slice numbers as list of integers.
#
def get_deleted_slice_numbers(self):
return list(self._deleted_slices)
##
# Sets the slice.
# \date 2018-04-18 22:05:28-0600
#
# \param self The object
# \param slice slice as Slice object
# \param index slice index as integer
#
def set_slice(self, slice, index):
if not isinstance(slice, sl.Slice):
raise IOError("Input must be of type Slice")
index = int(index)
if abs(index) > self._N_slices - 1:
raise ValueError(
"Enter a valid index between -%s and %s. Tried: %s" %
(self._N_slices - 1, self._N_slices - 1, index))
self._slices[index] = slice
##
# Delete slice at given index
# \date 2017-12-01 00:38:56+0000
#
# Note that index refers to list index of slices (0 ... N_slices_current) whereas
# "deleted slice index" refers to actual slice number within original stack
#
# \param self The object
# \param index slice as Slice object to be deleted
#
def delete_slice(self, slice):
if not isinstance(slice, sl.Slice):
raise IOError("Input must be of type Slice")
# keep slice number (w.r.t. original stack)
self._deleted_slices.append(int(slice.get_slice_number()))
self._deleted_slices = sorted((self._deleted_slices))
# delete slice
index = self._slices.index(slice)
self._slices[index] = None
def get_deleted_slice_numbers(self):
return list(self._deleted_slices)
# Get name of directory where nifti was read from
# \return string of directory wher nifti was read from
# \bug Does not exist for all created instances! E.g. Stack.from_sitk_image
def get_directory(self):
return self._dir
def set_filename(self, filename):
self._filename = filename
slices = self.get_slices()
if slices is not None:
for s in slices:
s.set_filename(filename)
# Get filename of read/assigned nifti file (Stack.from_filename vs Stack.from_sitk_image)
# \return string of filename
def get_filename(self):
return self._filename
# Get history history of affine transforms, i.e. encoded spatial
# position+orientation of slice, and rigid motion estimates of slice
# obtained in the course of the registration/reconstruction process
# \return list of sitk.AffineTransform and sitk.Euler3DTransform objects
def get_registration_history(self):
affine_transforms = list(self._history_affine_transforms)
motion_corrections = list(self._history_motion_corrections)
return affine_transforms, motion_corrections
def set_registration_history(self, registration_history):
affine_transform_sitk = registration_history[0][-1]
self._update_affine_transform(affine_transform_sitk)
self._history_affine_transforms = [a for a in registration_history[0]]
self._history_motion_corrections = [t for t in registration_history[1]]
# Get number of slices of stack
# \return number of slices of stack
def get_number_of_slices(self):
return len(self.get_slices())
def is_unity_mask(self):
return self._is_unity_mask
# Display stack with external viewer (ITK-Snap)
# \param[in][in] show_segmentation display stack with or without associated segmentation (default=0)
def show(self, show_segmentation=0, label=None, viewer=VIEWER, verbose=True):
if label is None:
label = self._filename
if show_segmentation:
sitk_mask = self.sitk_mask
else:
sitk_mask = None
sitkh.show_sitk_image(
self.sitk,
label=label,
segmentation=sitk_mask,
viewer=viewer,
verbose=verbose)
def show_slices(self):
sitkh.plot_stack_of_slices(
self.sitk, cmap="Greys_r", title=self.get_filename())
# Write information of Stack to HDD to given directory:
# - sitk.Image object as entire volume
# - each single slice with its associated spatial transformation (optional)
# \param[in] directory string specifying where the output will be written to (default="/tmp/")
# \param[in] filename string specifying the filename. If not given the assigned one within Stack will be chosen.
# \param[in] write_slices boolean indicating whether each Slice of the stack shall be written (default=False)
def write(self,
directory,
filename=None,
write_stack=True,
write_mask=False,
write_slices=False,
write_transforms=False,
suffix_mask="_mask",
write_transforms_history=False,
):
# Create directory if not existing
ph.create_directory(directory)
# Construct filename
if filename is None:
filename = self._filename
full_file_name = os.path.join(directory, filename)
# Write file to specified location
if write_stack:
dw.DataWriter.write_image(self.sitk, "%s.nii.gz" % full_file_name)
# Write mask to specified location if given
if self.sitk_mask is not None:
# nda = sitk.GetArrayFromImage(self.sitk_mask)
# Write mask if it does not consist of only ones
if not self._is_unity_mask and write_mask:
dw.DataWriter.write_mask(
self.sitk_mask, "%s%s.nii.gz" % (full_file_name, suffix_mask))
if write_transforms:
stack_transform_sitk = self._history_motion_corrections[-1]
sitk.WriteTransform(
stack_transform_sitk,
os.path.join(directory, self.get_filename() + ".tfm")
)
# Write each separate Slice of stack (if they exist)
if write_slices or write_transforms:
try:
# Check whether variable exists
# if 'self._slices' not in locals() or all(i is None for i in
# self._slices):
if not hasattr(self, '_slices'):
raise ValueError(
"Error occurred in attempt to write %s.nii.gz: "
"No separate slices of object Slice are found" %
full_file_name)
# Write slices
else:
if write_transforms and write_slices:
ph.print_info(
"Write %s image slices and slice transforms to %s ... " % (
self.get_filename(), directory),
newline=False)
elif write_transforms and not write_slices:
ph.print_info(
"Write %s slice transforms to %s ... " % (
self.get_filename(), directory),
newline=False)
else:
ph.print_info(
"Write %s image slices to %s ... " % (
self.get_filename(), directory),
newline=False)
for slice in self.get_slices():
slice.write(
directory=directory,
filename=filename,
write_transform=write_transforms,
write_slice=write_slices,
suffix_mask=suffix_mask,
write_transforms_history=write_transforms_history,
)
print("done")
except ValueError as err:
print(err.message)
##
# Apply transform on stack and all its slices
# \date 2016-11-05 19:15:57+0000
#
# \param self The object
# \param affine_transform_sitk The affine transform sitk
#
def update_motion_correction(self, affine_transform_sitk):
# Update rigid motion estimate
current_rigid_motion_estimate = sitkh.get_composite_sitk_affine_transform(
affine_transform_sitk, self._history_motion_corrections[-1])
self._history_motion_corrections.append(current_rigid_motion_estimate)
# New affine transform of slice after rigid motion correction
affine_transform = sitkh.get_composite_sitk_affine_transform(
affine_transform_sitk, self._affine_transform_sitk)
# Update affine transform of stack, i.e. change image origin and
# direction in physical space
self._update_affine_transform(affine_transform)
# Update slices
if self.get_slices() is not None:
for i in range(0, self._N_slices):
self._slices[i].update_motion_correction(affine_transform_sitk)
##
# Apply transforms on all the slices of the stack. Stack itself
# is not getting transformed
# \date 2016-11-05 19:16:33+0000
#
# \param self The object
# \param affine_transforms_sitk List of sitk transform instances
#
def update_motion_correction_of_slices(self, affine_transforms_sitk):
if [type(affine_transforms_sitk) is list or type(affine_transforms_sitk) is np.array] \
and len(affine_transforms_sitk) is self._N_slices:
for i in range(0, self._N_slices):
self._slices[i].update_motion_correction(
affine_transforms_sitk[i])
else:
raise ValueError("Number of affine transforms does not match the "
"number of slices")
def _update_affine_transform(self, affine_transform_sitk):
# Ensure correct object type
self._affine_transform_sitk = sitk.AffineTransform(
affine_transform_sitk)
# Append transform to registration history
self._history_affine_transforms.append(affine_transform_sitk)
# Get origin and direction of transformed 3D slice given the new
# spatial transform
origin = sitkh.get_sitk_image_origin_from_sitk_affine_transform(
affine_transform_sitk, self.sitk)
direction = sitkh.get_sitk_image_direction_from_sitk_affine_transform(
affine_transform_sitk, self.sitk)
# Update image objects
self.sitk.SetOrigin(origin)
self.sitk.SetDirection(direction)
self.sitk_mask.SetOrigin(origin)
self.sitk_mask.SetDirection(direction)
self.itk.SetOrigin(origin)
self.itk.SetDirection(sitkh.get_itk_from_sitk_direction(direction))
self.itk_mask.SetOrigin(origin)
self.itk_mask.SetDirection(
sitkh.get_itk_from_sitk_direction(direction))
##
# Gets the resampled stack from slices.
# \date 2016-09-26 17:28:43+0100
#
# After slice-based registrations slice j does not correspond to the
# physical space of stack[:,:,j:j+1] anymore. With this method resample all
# containing slices to the physical space defined by the stack itself (or
# by a given resampling_pace). Overlapping slices get averaged.
#
# \param self The object
# \param resampling_grid Define the space to which the stack of
# slices shall be resampled; given as Stack
# object
# \param interpolator The interpolator
#
# \return resampled stack based on current position of slices as Stack
# object
#
def get_resampled_stack_from_slices(self, resampling_grid=None, interpolator="NearestNeighbor", default_pixel_value=0.0, filename=None):
# Choose interpolator
try:
interpolator_str = interpolator
interpolator = eval("sitk.sitk" + interpolator_str)
except:
raise ValueError("Error: interpolator is not known")
# Use resampling grid defined by original volumetric image
if resampling_grid is None:
resampling_grid = Stack.from_sitk_image(
image_sitk=self.sitk,
slice_thickness=self.get_slice_thickness(),
)
else:
# Use resampling grid defined by first slice (which might be
# shifted already)
if resampling_grid in ["on_first_slice"]:
stack_sitk = sitk.Image(self.sitk)
foo_sitk = sitk.Image(self._slices[0].sitk)
stack_sitk.SetDirection(foo_sitk.GetDirection())
stack_sitk.SetOrigin(foo_sitk.GetOrigin())
stack_sitk.SetSpacing(foo_sitk.GetSpacing())
resampling_grid = Stack.from_sitk_image(stack_sitk)
# Use resampling grid defined by given sitk.Image
elif type(resampling_grid) is sitk.Image:
resampling_grid = Stack.from_sitk_image(resampling_grid)
# Get shape of image data array
nda_shape = resampling_grid.sitk.GetSize()[::-1]
# Create zero image and its mask aligned with sitk.Image
nda = np.zeros(nda_shape)
stack_resampled_sitk = sitk.GetImageFromArray(nda)
stack_resampled_sitk.CopyInformation(resampling_grid.sitk)
stack_resampled_sitk = sitk.Cast(
stack_resampled_sitk, resampling_grid.sitk.GetPixelIDValue())
stack_resampled_sitk_mask = sitk.GetImageFromArray(nda.astype("uint8"))
stack_resampled_sitk_mask.CopyInformation(resampling_grid.sitk_mask)
stack_resampled_sitk_mask = sitk.Cast(
stack_resampled_sitk_mask, resampling_grid.sitk_mask.GetPixelIDValue())
# Create helper used for normalization at the end
nda_stack_covered_indices = np.zeros(nda_shape)
for i in range(0, self._N_slices):
slice = self._slices[i]
# Resample slice and its mask to stack space (volume)
stack_resampled_slice_sitk = sitk.Resample(
slice.sitk,
resampling_grid.sitk,
sitk.Euler3DTransform(),
interpolator,
default_pixel_value,
resampling_grid.sitk.GetPixelIDValue())
stack_resampled_slice_sitk_mask = sitk.Resample(
slice.sitk_mask,
resampling_grid.sitk_mask,
sitk.Euler3DTransform(),
sitk.sitkNearestNeighbor,
0,
resampling_grid.sitk_mask.GetPixelIDValue())
# Add resampled slice and mask to stack space
stack_resampled_sitk += stack_resampled_slice_sitk
stack_resampled_sitk_mask += stack_resampled_slice_sitk_mask
# Get indices which are updated in stack space
nda_stack_resampled_slice_ind = sitk.GetArrayFromImage(
stack_resampled_slice_sitk)
ind = np.nonzero(nda_stack_resampled_slice_ind)
# Increment counter for respective updated voxels
nda_stack_covered_indices[ind] += 1
# Set voxels with zero counter to 1 so as to have well-defined
# normalization
nda_stack_covered_indices[nda_stack_covered_indices == 0] = 1
# Normalize resampled image
stack_normalization = sitk.GetImageFromArray(nda_stack_covered_indices)
stack_normalization.CopyInformation(resampling_grid.sitk)
stack_normalization = sitk.Cast(
stack_normalization, resampling_grid.sitk.GetPixelIDValue())
stack_resampled_sitk /= stack_normalization
# Get valid binary mask
stack_resampled_slice_sitk_mask /= stack_resampled_slice_sitk_mask
if filename is None:
filename = self._filename + "_" + interpolator_str
stack = self.from_sitk_image(
image_sitk=stack_resampled_sitk,
filename=filename,
image_sitk_mask=stack_resampled_sitk_mask,
slice_thickness=stack_resampled_sitk.GetSpacing()[-1],
)
return stack
##
# Gets the resampled stack.
# \date 2016-12-02 17:05:10+0000
#
# \param self The object
# \param resampling_grid The resampling grid as SimpleITK image
# \param interpolator The interpolator
# \param default_pixel_value The default pixel value
#
# \return The resampled stack as Stack object
#
def get_resampled_stack(self, resampling_grid=None, spacing=None, interpolator="Linear", default_pixel_value=0.0, filename=None):
if (resampling_grid is None and spacing is None) or \
(resampling_grid is not None and spacing is not None):
raise IOError(
"Either 'resampling_grid' or 'spacing' must be specified")
# Get SimpleITK-interpolator
try:
interpolator_str = interpolator
interpolator = eval("sitk.sitk" + interpolator_str)
except:
raise ValueError(
"Error: interpolator is not known. "
"Must fit sitk.InterpolatorEnum format. "
"Possible examples include "
"'NearestNeighbor', 'Linear', or 'BSpline'.")
if resampling_grid is not None:
resampled_stack_sitk = sitk.Resample(
self.sitk,
resampling_grid,
sitk.Euler3DTransform(),
interpolator,
default_pixel_value,
self.sitk.GetPixelIDValue())
resampled_stack_sitk_mask = sitk.Resample(
self.sitk_mask,
resampling_grid,
sitk.Euler3DTransform(),
sitk.sitkNearestNeighbor,
0,
self.sitk_mask.GetPixelIDValue())
else:
resampler = simplereg.resampler.Resampler
resampled_stack_sitk = resampler.get_resampled_image_sitk(
image_sitk=self.sitk,
spacing=spacing,
interpolator=interpolator,
padding=default_pixel_value,
add_to_grid_unit="mm",
)
resampled_stack_sitk_mask = resampler.get_resampled_image_sitk(
image_sitk=self.sitk_mask,
spacing=spacing,
interpolator=sitk.sitkNearestNeighbor,
padding=0,
add_to_grid_unit="mm",
)
# Create Stack instance
if filename is None:
filename = self._filename + "_" + interpolator_str
stack = self.from_sitk_image(
image_sitk=resampled_stack_sitk,
slice_thickness=resampled_stack_sitk.GetSpacing()[-1],
filename=filename,
image_sitk_mask=resampled_stack_sitk_mask,
)
return stack
##
# Gets the stack multiplied with its mask. Rationale behind is to obtain
# "cleaner" looking HR images after the SRR step where motion-correction
# might have dispersed some slices
# \date 2017-05-26 13:50:39+0100
#
# \param self The object
# \param filename The filename
#
# \return The stack multiplied with its mask.
#
def get_stack_multiplied_with_mask(self, filename=None, mask_sitk=None):
if mask_sitk is None:
mask_sitk = self.sitk_mask
# Multiply stack with its mask
image_sitk = self.sitk * \
sitk.Cast(mask_sitk, self.sitk.GetPixelIDValue())
if filename is None:
filename = self.get_filename()
return Stack.from_sitk_image(
image_sitk=image_sitk,
filename=filename,
image_sitk_mask=mask_sitk,
slice_thickness=self.get_slice_thickness(),
)
# Get stack resampled on isotropic grid based on the actual position of
# its slices
# \param[in] resolution length of voxel side, scalar
# \return isotropically, resampled stack as Stack object
def get_isotropically_resampled_stack_from_slices(self, resolution=None, interpolator="NearestNeighbor", default_pixel_value=0.0, filename=None):
resampled_stack = self.get_resampled_stack_from_slices()
# Choose interpolator
try:
interpolator_str = interpolator
interpolator = eval("sitk.sitk" + interpolator_str)
except:
raise ValueError("Error: interpolator is not known")
# Read original spacing (voxel dimension) and size of target stack:
spacing = np.array(resampled_stack.sitk.GetSpacing())
size = np.array(resampled_stack.sitk.GetSize()).astype("int")
if resolution is None:
size_new = size
spacing_new = spacing
# Update information according to isotropic resolution
size_new[2] = np.round(
spacing[2] / spacing[0] * size[2]).astype("int")
spacing_new[2] = spacing[0]
else:
spacing_new = np.ones(3) * resolution
size_new = np.round(spacing / spacing_new * size).astype("int")
# For Python3: sitk.Resample in Python3 does not like np.int types!
size_new = [int(i) for i in size_new]
# Resample image and its mask to isotropic grid
isotropic_resampled_stack_sitk = sitk.Resample(
resampled_stack.sitk,
size_new,
sitk.Euler3DTransform(),
interpolator,
resampled_stack.sitk.GetOrigin(),
spacing_new,
resampled_stack.sitk.GetDirection(),
default_pixel_value,
resampled_stack.sitk.GetPixelIDValue())
isotropic_resampled_stack_sitk_mask = sitk.Resample(
resampled_stack.sitk_mask,
size_new,
sitk.Euler3DTransform(),
sitk.sitkNearestNeighbor,
resampled_stack.sitk.GetOrigin(),
spacing_new,
resampled_stack.sitk.GetDirection(),
0,
resampled_stack.sitk_mask.GetPixelIDValue())
# Create Stack instance
if filename is None:
filename = self._filename + "_" + interpolator_str + "Iso"
stack = self.from_sitk_image(
isotropic_resampled_stack_sitk, filename, isotropic_resampled_stack_sitk_mask)
return stack
##
# Gets the isotropically resampled stack.
# \date 2017-02-03 16:34:24+0000
#
# \param self The object
# \param resolution length of voxel side, scalar
# \param interpolator choose type of interpolator for
# resampling
# \param extra_frame additional extra frame of zero
# intensities surrounding the stack in mm
# \param filename Filename of resampled stack
# \param mask_dilation_radius The mask dilation radius
# \param mask_dilation_kernel The kernel in "Ball", "Box", "Annulus"
# or "Cross"
#
# \return The isotropically resampled stack.
#
def get_isotropically_resampled_stack(self, resolution=None, interpolator="Linear", extra_frame=0, filename=None, mask_dilation_radius=0, mask_dilation_kernel="Ball"):
# Choose interpolator
try:
interpolator_str = interpolator
interpolator = eval("sitk.sitk" + interpolator_str)
except:
raise ValueError("Error: interpolator is not known")
if resolution is None:
spacing = self.sitk.GetSpacing()[0]
else:
spacing = resolution
# Resample image and its mask to isotropic grid
resampler = simplereg.resampler.Resampler
isotropic_resampled_stack_sitk = resampler.get_resampled_image_sitk(
image_sitk=self.sitk,
spacing=spacing,
interpolator=interpolator,
padding=0.0,
add_to_grid=extra_frame,
add_to_grid_unit="mm",
)
isotropic_resampled_stack_sitk_mask = resampler.get_resampled_image_sitk(
image_sitk=self.sitk_mask,
spacing=spacing,
interpolator=sitk.sitkNearestNeighbor,
padding=0,
add_to_grid=extra_frame,
add_to_grid_unit="mm",
)
if mask_dilation_radius > 0:
dilater = sitk.BinaryDilateImageFilter()
dilater.SetKernelType(eval("sitk.sitk" + mask_dilation_kernel))
dilater.SetKernelRadius(mask_dilation_radius)
isotropic_resampled_stack_sitk_mask = dilater.Execute(
isotropic_resampled_stack_sitk_mask)
# Create Stack instance
if filename is None:
filename = self._filename + "_" + interpolator_str + "Iso"
stack = self.from_sitk_image(
image_sitk=isotropic_resampled_stack_sitk,
filename=filename,
slice_thickness=isotropic_resampled_stack_sitk.GetSpacing()[-1],
image_sitk_mask=isotropic_resampled_stack_sitk_mask,
)
return stack
# Increase stack by adding zero voxels in respective directions
# \remark Used for MS project to add empty slices on top of (chopped) brain
# \param[in] resolution length of voxel side, scalar
# \param[in] interpolator choose type of interpolator for resampling
# \param[in] extra_frame additional extra frame of zero intensities surrounding the stack in mm
# \return isotropically, resampled stack as Stack object
def get_increased_stack(self, extra_slices_z=0):
interpolator = sitk.sitkNearestNeighbor
# Read original spacing (voxel dimension) and size of target stack:
spacing = np.array(self.sitk.GetSpacing())
size = np.array(self.sitk.GetSize()).astype("int")
origin = np.array(self.sitk.GetOrigin())
direction = self.sitk.GetDirection()
# Update information according to isotropic resolution
size[2] += extra_slices_z
# Resample image and its mask to isotropic grid
default_pixel_value = 0.0
isotropic_resampled_stack_sitk = sitk.Resample(
self.sitk,
size,
sitk.Euler3DTransform(),
interpolator,
origin,
spacing,
direction,
default_pixel_value,
self.sitk.GetPixelIDValue())
isotropic_resampled_stack_sitk_mask = sitk.Resample(
self.sitk_mask,
size,
sitk.Euler3DTransform(),
sitk.sitkNearestNeighbor,
origin,
spacing,
direction,
0,
self.sitk_mask.GetPixelIDValue())
# Create Stack instance
stack = self.from_sitk_image(
isotropic_resampled_stack_sitk, "zincreased_" + self._filename, isotropic_resampled_stack_sitk_mask)
return stack
def get_cropped_stack_based_on_mask(self, boundary_i=0, boundary_j=0, boundary_k=0, unit="mm"):
# Get rectangular region surrounding the masked voxels
[x_range, y_range, z_range] = self._get_rectangular_masked_region(
self.sitk_mask)
if np.array([x_range, y_range, z_range]).all() is None:
raise RuntimeError(
"Cropping to bounding box of mask led to an empty image. "
"Check the image stack to see whether the region of interest "
"is presented in '%s'." % self._filename)
if unit == "mm":
spacing = self.sitk.GetSpacing()
boundary_i = np.round(boundary_i / float(spacing[0]))
boundary_j = np.round(boundary_j / float(spacing[1]))
boundary_k = np.round(boundary_k / float(spacing[2]))
shape = self.sitk.GetSize()
x_range[0] = np.max([0, x_range[0] - boundary_i])
x_range[1] = np.min([shape[0], x_range[1] + boundary_i])
y_range[0] = np.max([0, y_range[0] - boundary_j])
y_range[1] = np.min([shape[1], y_range[1] + boundary_j])
z_range[0] = np.max([0, z_range[0] - boundary_k])
z_range[1] = np.min([shape[2], z_range[1] + boundary_k])
# Crop to image region defined by rectangular mask
image_crop_sitk = self._crop_image_to_region(
self.sitk, x_range, y_range, z_range)
mask_crop_sitk = self._crop_image_to_region(
self.sitk_mask, x_range, y_range, z_range)
slice_numbers = range(z_range[0], z_range[1])
stack = self.from_sitk_image(
image_sitk=image_crop_sitk,
slice_thickness=self.get_slice_thickness(),
filename=self._filename,
image_sitk_mask=mask_crop_sitk,
slice_numbers=slice_numbers)
return stack
# Return rectangular region surrounding masked region.
# \param[in] mask_sitk sitk.Image representing the mask
# \return range_x pair defining x interval of mask in voxel space
# \return range_y pair defining y interval of mask in voxel space
# \return range_z pair defining z interval of mask in voxel space
def _get_rectangular_masked_region(self, mask_sitk):
spacing = np.array(mask_sitk.GetSpacing())
# Get mask array
nda = sitk.GetArrayFromImage(mask_sitk)
# Return in case no masked pixel available
if np.sum(abs(nda)) == 0:
return None, None, None
# Get shape defining the dimension in each direction
shape = nda.shape
# Compute sum of pixels of each slice along specified directions
sum_xy = np.sum(nda, axis=(0, 1)) # sum within x-y-plane
sum_xz = np.sum(nda, axis=(0, 2)) # sum within x-z-plane
sum_yz = np.sum(nda, axis=(1, 2)) # sum within y-z-plane
# Find masked regions (non-zero sum!)
range_x = np.zeros(2)
range_y = np.zeros(2)
range_z = np.zeros(2)
# Non-zero elements of numpy array nda defining x_range
ran = np.nonzero(sum_yz)[0]
range_x[0] = np.max([0, ran[0]])
range_x[1] = np.min([shape[0], ran[-1] + 1])
# Non-zero elements of numpy array nda defining y_range
ran = | np.nonzero(sum_xz) | numpy.nonzero |
# Copyright 2018 GPflow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Kernels form a core component of GPflow models and allow prior information to
be encoded about a latent function of interest. The effect of choosing
different kernels, and how it is possible to combine multiple kernels is shown
in the `"Using kernels in GPflow" notebook <notebooks/kernels.html>`_.
"""
import abc
from functools import partial, reduce
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from ..base import Module
class Kernel(Module, metaclass=abc.ABCMeta):
"""
The basic kernel class. Handles active dims.
"""
def __init__(self,
active_dims: Optional[Union[slice, list]] = None,
name: Optional[str] = None):
"""
:param active_dims: active dimensions, has the slice type.
:param name: optional kernel name.
"""
super().__init__(name=name)
if isinstance(active_dims, list):
active_dims = | np.array(active_dims) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import math
import os
import pickle
import unittest
from copy import deepcopy
import warnings
import numpy as np
import numpy.ma as ma
from obspy import Stream, Trace, UTCDateTime, __version__, read, read_inventory
from obspy.core import Stats
from obspy.core.compatibility import mock
from obspy.core.util.testing import ImageComparison
from obspy.io.xseed import Parser
class TraceTestCase(unittest.TestCase):
"""
Test suite for obspy.core.trace.Trace.
"""
@staticmethod
def __remove_processing(tr):
"""
Removes all processing information in the trace object.
Useful for testing.
"""
if "processing" not in tr.stats:
return
del tr.stats.processing
def test_init(self):
"""
Tests the __init__ method of the Trace class.
"""
# NumPy ndarray
tr = Trace(data=np.arange(4))
self.assertEqual(len(tr), 4)
# NumPy masked array
data = np.ma.array([0, 1, 2, 3], mask=[True, True, False, False])
tr = Trace(data=data)
self.assertEqual(len(tr), 4)
# other data types will raise
self.assertRaises(ValueError, Trace, data=[0, 1, 2, 3])
self.assertRaises(ValueError, Trace, data=(0, 1, 2, 3))
self.assertRaises(ValueError, Trace, data='1234')
def test_setattr(self):
"""
Tests the __setattr__ method of the Trace class.
"""
# NumPy ndarray
tr = Trace()
tr.data = np.arange(4)
self.assertEqual(len(tr), 4)
# NumPy masked array
tr = Trace()
tr.data = np.ma.array([0, 1, 2, 3], mask=[True, True, False, False])
self.assertEqual(len(tr), 4)
# other data types will raise
tr = Trace()
self.assertRaises(ValueError, tr.__setattr__, 'data', [0, 1, 2, 3])
self.assertRaises(ValueError, tr.__setattr__, 'data', (0, 1, 2, 3))
self.assertRaises(ValueError, tr.__setattr__, 'data', '1234')
def test_len(self):
"""
Tests the __len__ and count methods of the Trace class.
"""
trace = Trace(data=np.arange(1000))
self.assertEqual(len(trace), 1000)
self.assertEqual(trace.count(), 1000)
def test_mul(self):
"""
Tests the __mul__ method of the Trace class.
"""
tr = Trace(data=np.arange(10))
st = tr * 5
self.assertEqual(len(st), 5)
# you may only multiply using an integer
self.assertRaises(TypeError, tr.__mul__, 2.5)
self.assertRaises(TypeError, tr.__mul__, '1234')
def test_div(self):
"""
Tests the __div__ method of the Trace class.
"""
tr = Trace(data=np.arange(1000))
st = tr / 5
self.assertEqual(len(st), 5)
self.assertEqual(len(st[0]), 200)
# you may only multiply using an integer
self.assertRaises(TypeError, tr.__div__, 2.5)
self.assertRaises(TypeError, tr.__div__, '1234')
def test_ltrim(self):
"""
Tests the ltrim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1000))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 4, 995000)
# verify
trace.verify()
# UTCDateTime/int/float required
self.assertRaises(TypeError, trace._ltrim, '1234')
self.assertRaises(TypeError, trace._ltrim, [1, 2, 3, 4])
# ltrim 100 samples
tr = deepcopy(trace)
tr._ltrim(0.5)
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([100, 101, 102, 103, 104]))
self.assertEqual(len(tr.data), 900)
self.assertEqual(tr.stats.npts, 900)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 0.5)
self.assertEqual(tr.stats.endtime, end)
# ltrim 202 samples
tr = deepcopy(trace)
tr._ltrim(1.010)
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([202, 203, 204, 205, 206]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 1.010)
self.assertEqual(tr.stats.endtime, end)
# ltrim to UTCDateTime
tr = deepcopy(trace)
tr._ltrim(UTCDateTime(2000, 1, 1, 0, 0, 1, 10000))
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([202, 203, 204, 205, 206]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 1.010)
self.assertEqual(tr.stats.endtime, end)
# some sanity checks
# negative start time as datetime
tr = deepcopy(trace)
tr._ltrim(start - 1, pad=True)
tr.verify()
self.assertEqual(tr.stats.starttime, start - 1)
np.testing.assert_array_equal(trace.data, tr.data[200:])
self.assertEqual(tr.stats.endtime, trace.stats.endtime)
# negative start time as integer
tr = deepcopy(trace)
tr._ltrim(-100, pad=True)
tr.verify()
self.assertEqual(tr.stats.starttime, start - 100)
delta = 100 * trace.stats.sampling_rate
np.testing.assert_array_equal(trace.data, tr.data[int(delta):])
self.assertEqual(tr.stats.endtime, trace.stats.endtime)
# start time > end time
tr = deepcopy(trace)
tr._ltrim(trace.stats.endtime + 100)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.endtime + 100)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# start time == end time
tr = deepcopy(trace)
tr._ltrim(5)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.starttime + 5)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# start time == end time
tr = deepcopy(trace)
tr._ltrim(5.1)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.starttime + 5.1)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
def test_rtrim(self):
"""
Tests the rtrim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1000))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 4, 995000)
trace.verify()
# UTCDateTime/int/float required
self.assertRaises(TypeError, trace._rtrim, '1234')
self.assertRaises(TypeError, trace._rtrim, [1, 2, 3, 4])
# rtrim 100 samples
tr = deepcopy(trace)
tr._rtrim(0.5)
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([895, 896, 897, 898, 899]))
self.assertEqual(len(tr.data), 900)
self.assertEqual(tr.stats.npts, 900)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 0.5)
# rtrim 202 samples
tr = deepcopy(trace)
tr._rtrim(1.010)
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([793, 794, 795, 796, 797]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 1.010)
# rtrim 1 minute via UTCDateTime
tr = deepcopy(trace)
tr._rtrim(UTCDateTime(2000, 1, 1, 0, 0, 3, 985000))
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([793, 794, 795, 796, 797]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 1.010)
# some sanity checks
# negative end time
tr = deepcopy(trace)
t = UTCDateTime(1999, 12, 31)
tr._rtrim(t)
tr.verify()
self.assertEqual(tr.stats.endtime, t)
np.testing.assert_array_equal(tr.data, np.empty(0))
# negative end time with given seconds
tr = deepcopy(trace)
tr._rtrim(100)
tr.verify()
self.assertEqual(tr.stats.endtime, trace.stats.endtime - 100)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# end time > start time
tr = deepcopy(trace)
t = UTCDateTime(2001)
tr._rtrim(t)
tr.verify()
self.assertEqual(tr.stats.endtime, t)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# end time > start time given seconds
tr = deepcopy(trace)
tr._rtrim(5.1)
tr.verify()
delta = int(math.floor(round(5.1 * trace.stats.sampling_rate, 7)))
endtime = trace.stats.starttime + trace.stats.delta * \
(trace.stats.npts - delta - 1)
self.assertEqual(tr.stats.endtime, endtime)
np.testing.assert_array_equal(tr.data, np.empty(0))
# end time == start time
# returns one sample!
tr = deepcopy(trace)
tr._rtrim(4.995)
tr.verify()
np.testing.assert_array_equal(tr.data, np.array([0]))
self.assertEqual(len(tr.data), 1)
self.assertEqual(tr.stats.npts, 1)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, start)
def test_rtrim_with_padding(self):
"""
Tests the _rtrim() method of the Trace class with padding. It has
already been tested in the two sided trimming tests. This is just to
have an explicit test. Also tests issue #429.
"""
# set up
trace = Trace(data=np.arange(10))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 1.0
trace.verify()
# Pad with no fill_value will mask the additional values.
tr = trace.copy()
end = tr.stats.endtime
tr._rtrim(end + 10, pad=True)
self.assertEqual(tr.stats.endtime, trace.stats.endtime + 10)
np.testing.assert_array_equal(tr.data[0:10], np.arange(10))
# Check that the first couple of entries are not masked.
self.assertFalse(tr.data[0:10].mask.any())
# All the other entries should be masked.
self.assertTrue(tr.data[10:].mask.all())
# Pad with fill_value.
tr = trace.copy()
end = tr.stats.endtime
tr._rtrim(end + 10, pad=True, fill_value=-33)
self.assertEqual(tr.stats.endtime, trace.stats.endtime + 10)
# The first ten entries should not have changed.
np.testing.assert_array_equal(tr.data[0:10], np.arange(10))
# The rest should be filled with the fill_value.
np.testing.assert_array_equal(tr.data[10:], np.ones(10) * -33)
def test_trim(self):
"""
Tests the trim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1001))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 5, 0)
trace.verify()
# rtrim 100 samples
trace.trim(0.5, 0.5)
trace.verify()
np.testing.assert_array_equal(trace.data[-5:],
np.array([896, 897, 898, 899, 900]))
np.testing.assert_array_equal(trace.data[:5],
np.array([100, 101, 102, 103, 104]))
self.assertEqual(len(trace.data), 801)
self.assertEqual(trace.stats.npts, 801)
self.assertEqual(trace.stats.sampling_rate, 200.0)
self.assertEqual(trace.stats.starttime, start + 0.5)
self.assertEqual(trace.stats.endtime, end - 0.5)
# start time should be before end time
self.assertRaises(ValueError, trace.trim, end, start)
def test_trim_all_does_not_change_dtype(self):
"""
If a Trace is completely trimmed, e.g. no data samples are remaining,
the dtype should remain unchanged.
A trace with no data samples is not really senseful but the dtype
should not be changed anyways.
"""
# Choose non native dtype.
tr = Trace(np.arange(100, dtype=np.int16))
tr.trim(UTCDateTime(10000), UTCDateTime(20000))
# Assert the result.
self.assertEqual(len(tr.data), 0)
self.assertEqual(tr.data.dtype, np.int16)
def test_add_trace_with_gap(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(0, 1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 10
# verify
tr1.verify()
tr2.verify()
# add
trace = tr1 + tr2
# stats
self.assertEqual(trace.stats.starttime, start)
self.assertEqual(trace.stats.endtime, start + 14.995)
self.assertEqual(trace.stats.sampling_rate, 200)
self.assertEqual(trace.stats.npts, 3000)
# data
self.assertEqual(len(trace), 3000)
self.assertEqual(trace[0], 0)
self.assertEqual(trace[999], 999)
self.assertTrue(ma.is_masked(trace[1000]))
self.assertTrue(ma.is_masked(trace[1999]))
self.assertEqual(trace[2000], 999)
self.assertEqual(trace[2999], 0)
# verify
trace.verify()
def test_add_trace_with_overlap(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(0, 1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 4
# add
trace = tr1 + tr2
# stats
self.assertEqual(trace.stats.starttime, start)
self.assertEqual(trace.stats.endtime, start + 8.995)
self.assertEqual(trace.stats.sampling_rate, 200)
self.assertEqual(trace.stats.npts, 1800)
# data
self.assertEqual(len(trace), 1800)
self.assertEqual(trace[0], 0)
self.assertEqual(trace[799], 799)
self.assertTrue(trace[800].mask)
self.assertTrue(trace[999].mask)
self.assertEqual(trace[1000], 799)
self.assertEqual(trace[1799], 0)
# verify
trace.verify()
def test_add_same_trace(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1001))
# add
trace = tr1 + tr1
# should return exact the same values
self.assertEqual(trace.stats, tr1.stats)
np.testing.assert_array_equal(trace.data, tr1.data)
# verify
trace.verify()
def test_add_trace_within_trace(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1001))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(201))
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 1
# add
trace = tr1 + tr2
# should return exact the same values like trace 1
self.assertEqual(trace.stats, tr1.stats)
mask = np.zeros(len(tr1)).astype(np.bool_)
mask[200:401] = True
np.testing.assert_array_equal(trace.data.mask, mask)
np.testing.assert_array_equal(trace.data.data[:200], tr1.data[:200])
np.testing.assert_array_equal(trace.data.data[401:], tr1.data[401:])
# add the other way around
trace = tr2 + tr1
# should return exact the same values like trace 1
self.assertEqual(trace.stats, tr1.stats)
np.testing.assert_array_equal(trace.data.mask, mask)
np.testing.assert_array_equal(trace.data.data[:200], tr1.data[:200])
np.testing.assert_array_equal(trace.data.data[401:], tr1.data[401:])
# verify
trace.verify()
def test_add_gap_and_overlap(self):
"""
Test order of merging traces.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 4
tr3 = Trace(data=np.arange(1000)[::-1])
tr3.stats.sampling_rate = 200
tr3.stats.starttime = start + 12
# overlap
overlap = tr1 + tr2
self.assertEqual(len(overlap), 1800)
mask = np.zeros(1800).astype(np.bool_)
mask[800:1000] = True
np.testing.assert_array_equal(overlap.data.mask, mask)
np.testing.assert_array_equal(overlap.data.data[:800], tr1.data[:800])
np.testing.assert_array_equal(overlap.data.data[1000:], tr2.data[200:])
# overlap + gap
overlap_gap = overlap + tr3
self.assertEqual(len(overlap_gap), 3400)
mask = np.zeros(3400).astype(np.bool_)
mask[800:1000] = True
mask[1800:2400] = True
np.testing.assert_array_equal(overlap_gap.data.mask, mask)
np.testing.assert_array_equal(overlap_gap.data.data[:800],
tr1.data[:800])
np.testing.assert_array_equal(overlap_gap.data.data[1000:1800],
tr2.data[200:])
np.testing.assert_array_equal(overlap_gap.data.data[2400:], tr3.data)
# gap
gap = tr2 + tr3
self.assertEqual(len(gap), 2600)
mask = np.zeros(2600).astype(np.bool_)
mask[1000:1600] = True
np.testing.assert_array_equal(gap.data.mask, mask)
np.testing.assert_array_equal(gap.data.data[:1000], tr2.data)
np.testing.assert_array_equal(gap.data.data[1600:], tr3.data)
def test_add_into_gap(self):
"""
Test __add__ method of the Trace class
Adding a trace that fits perfectly into gap in a trace
"""
my_array = np.arange(6, dtype=np.int32)
stats = Stats()
stats.network = 'VI'
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 0)
stats['npts'] = 0
stats['station'] = 'IKJA'
stats['channel'] = 'EHZ'
stats['sampling_rate'] = 1
bigtrace = Trace(data=np.array([], dtype=np.int32), header=stats)
bigtrace_sort = bigtrace.copy()
stats['npts'] = len(my_array)
my_trace = Trace(data=my_array, header=stats)
stats['npts'] = 2
trace1 = Trace(data=my_array[0:2].copy(), header=stats)
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 2)
trace2 = Trace(data=my_array[2:4].copy(), header=stats)
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 4)
trace3 = Trace(data=my_array[4:6].copy(), header=stats)
tr1 = bigtrace
tr2 = bigtrace_sort
for method in [0, 1]:
# Random
bigtrace = tr1.copy()
bigtrace = bigtrace.__add__(trace1, method=method)
bigtrace = bigtrace.__add__(trace3, method=method)
bigtrace = bigtrace.__add__(trace2, method=method)
# Sorted
bigtrace_sort = tr2.copy()
bigtrace_sort = bigtrace_sort.__add__(trace1, method=method)
bigtrace_sort = bigtrace_sort.__add__(trace2, method=method)
bigtrace_sort = bigtrace_sort.__add__(trace3, method=method)
for tr in (bigtrace, bigtrace_sort):
self.assertTrue(isinstance(tr, Trace))
self.assertFalse(isinstance(tr.data, np.ma.masked_array))
self.assertTrue((bigtrace_sort.data == my_array).all())
fail_pattern = "\n\tExpected %s\n\tbut got %s"
failinfo = fail_pattern % (my_trace, bigtrace_sort)
failinfo += fail_pattern % (my_trace.data, bigtrace_sort.data)
self.assertEqual(bigtrace_sort, my_trace, failinfo)
failinfo = fail_pattern % (my_array, bigtrace.data)
self.assertTrue((bigtrace.data == my_array).all(), failinfo)
failinfo = fail_pattern % (my_trace, bigtrace)
failinfo += fail_pattern % (my_trace.data, bigtrace.data)
self.assertEqual(bigtrace, my_trace, failinfo)
for array_ in (bigtrace.data, bigtrace_sort.data):
failinfo = fail_pattern % (my_array.dtype, array_.dtype)
self.assertEqual(my_array.dtype, array_.dtype, failinfo)
def test_slice(self):
"""
Tests the slicing of trace objects.
"""
tr = Trace(data=np.arange(10, dtype=np.int32))
mempos = tr.data.ctypes.data
t = tr.stats.starttime
tr1 = tr.slice(t + 2, t + 8)
tr1.data[0] = 10
self.assertEqual(tr.data[2], 10)
self.assertEqual(tr.data.ctypes.data, mempos)
self.assertEqual(tr.data[2:9].ctypes.data, tr1.data.ctypes.data)
self.assertEqual(tr1.data.ctypes.data - 8, mempos)
# Test the processing information for the slicing. The sliced trace
# should have a processing information showing that it has been
# trimmed. The original trace should have nothing.
tr = Trace(data=np.arange(10, dtype=np.int32))
tr2 = tr.slice(tr.stats.starttime)
self.assertNotIn("processing", tr.stats)
self.assertIn("processing", tr2.stats)
self.assertIn("trim", tr2.stats.processing[0])
def test_slice_no_starttime_or_endtime(self):
"""
Tests the slicing of trace objects with no start time or end time
provided. Compares results against the equivalent trim() operation
"""
tr_orig = Trace(data=np.arange(10, dtype=np.int32))
tr = tr_orig.copy()
# two time points outside the trace and two inside
t1 = tr.stats.starttime - 2
t2 = tr.stats.starttime + 2
t3 = tr.stats.endtime - 3
t4 = tr.stats.endtime + 2
# test 1: only removing data at left side
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t2)
self.assertEqual(tr_trim, tr.slice(starttime=t2))
tr2 = tr.slice(starttime=t2, endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 2: only removing data at right side
tr_trim = tr_orig.copy()
tr_trim.trim(endtime=t3)
self.assertEqual(tr_trim, tr.slice(endtime=t3))
tr2 = tr.slice(starttime=t1, endtime=t3)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 3: not removing data at all
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t1, endtime=t4)
tr2 = tr.slice()
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(endtime=t4)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1, endtime=t4)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr_trim.trim()
tr2 = tr.slice()
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1, endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 4: removing data at left and right side
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t2, endtime=t3)
self.assertEqual(tr_trim, tr.slice(t2, t3))
self.assertEqual(tr_trim, tr.slice(starttime=t2, endtime=t3))
# test 5: no data left after operation
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t4)
tr2 = tr.slice(starttime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t4, endtime=t4 + 1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
def test_slice_nearest_sample(self):
"""
Tests slicing with the nearest sample flag set to on or off.
"""
tr = Trace(data=np.arange(6))
# Samples at:
# 0 10 20 30 40 50
tr.stats.sampling_rate = 0.1
# Nearest sample flag defaults to true.
tr2 = tr.slice(UTCDateTime(4), UTCDateTime(44))
self.assertEqual(tr2.stats.starttime, UTCDateTime(0))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
tr2 = tr.slice(UTCDateTime(8), UTCDateTime(48))
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(50))
# Setting it to False changes the returned values.
tr2 = tr.slice(UTCDateTime(4), UTCDateTime(44), nearest_sample=False)
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
tr2 = tr.slice(UTCDateTime(8), UTCDateTime(48), nearest_sample=False)
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
def test_trim_floating_point(self):
"""
Tests the slicing of trace objects.
"""
# Create test array that allows for easy testing.
tr = Trace(data=np.arange(11))
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Just some sanity tests.
self.assertEqual(tr.stats.starttime, UTCDateTime(0))
self.assertEqual(tr.stats.endtime, UTCDateTime(10))
# Create temp trace object used for testing.
st = tr.stats.starttime
# This is supposed to include the start and end times and should
# therefore cut right at 2 and 8.
temp = deepcopy(tr)
temp.trim(st + 2.1, st + 7.1)
# Should be identical.
temp2 = deepcopy(tr)
temp2.trim(st + 2.0, st + 8.0)
self.assertEqual(temp.stats.starttime, UTCDateTime(2))
self.assertEqual(temp.stats.endtime, UTCDateTime(7))
self.assertEqual(temp.stats.npts, 6)
self.assertEqual(temp2.stats.npts, 7)
# self.assertEqual(temp.stats, temp2.stats)
np.testing.assert_array_equal(temp.data, temp2.data[:-1])
# Create test array that allows for easy testing.
# Check if the data is the same.
self.assertNotEqual(temp.data.ctypes.data, tr.data[2:9].ctypes.data)
np.testing.assert_array_equal(tr.data[2:8], temp.data)
# Using out of bounds times should not do anything but create
# a copy of the stats.
temp = deepcopy(tr)
temp.trim(st - 2.5, st + 200)
# The start and end times should not change.
self.assertEqual(temp.stats.starttime, UTCDateTime(0))
self.assertEqual(temp.stats.endtime, UTCDateTime(10))
self.assertEqual(temp.stats.npts, 11)
# Alter the new stats to make sure the old one stays intact.
temp.stats.starttime = UTCDateTime(1000)
self.assertEqual(org_stats, tr.stats)
# Check if the data address is not the same, that is it is a copy
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
np.testing.assert_array_equal(tr.data, temp.data)
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
# Use more complicated times and sampling rate.
tr = Trace(data=np.arange(111))
tr.stats.starttime = UTCDateTime(111.11111)
tr.stats.sampling_rate = 50.0
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Create temp trace object used for testing.
temp = deepcopy(tr)
temp.trim(UTCDateTime(111.22222), UTCDateTime(112.99999),
nearest_sample=False)
# Should again be identical. XXX NOT!
temp2 = deepcopy(tr)
temp2.trim(UTCDateTime(111.21111), UTCDateTime(113.01111),
nearest_sample=False)
np.testing.assert_array_equal(temp.data, temp2.data[1:-1])
# Check stuff.
self.assertEqual(temp.stats.starttime, UTCDateTime(111.23111))
self.assertEqual(temp.stats.endtime, UTCDateTime(112.991110))
# Check if the data is the same.
temp = deepcopy(tr)
temp.trim(UTCDateTime(0), UTCDateTime(1000 * 1000))
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
# starttime must be in conformance with sampling rate
t = UTCDateTime(111.11111)
self.assertEqual(temp.stats.starttime, t)
delta = int((tr.stats.starttime - t) * tr.stats.sampling_rate + .5)
np.testing.assert_array_equal(tr.data, temp.data[delta:delta + 111])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_trim_floating_point_with_padding_1(self):
"""
Tests the slicing of trace objects with the use of the padding option.
"""
# Create test array that allows for easy testing.
tr = Trace(data=np.arange(11))
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Just some sanity tests.
self.assertEqual(tr.stats.starttime, UTCDateTime(0))
self.assertEqual(tr.stats.endtime, UTCDateTime(10))
# Create temp trace object used for testing.
st = tr.stats.starttime
# Using out of bounds times should not do anything but create
# a copy of the stats.
temp = deepcopy(tr)
temp.trim(st - 2.5, st + 200, pad=True)
self.assertEqual(temp.stats.starttime.timestamp, -2.0)
self.assertEqual(temp.stats.endtime.timestamp, 200)
self.assertEqual(temp.stats.npts, 203)
mask = np.zeros(203).astype(np.bool_)
mask[:2] = True
mask[13:] = True
np.testing.assert_array_equal(temp.data.mask, mask)
# Alter the new stats to make sure the old one stays intact.
temp.stats.starttime = UTCDateTime(1000)
self.assertEqual(org_stats, tr.stats)
# Check if the data address is not the same, that is it is a copy
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
np.testing.assert_array_equal(tr.data, temp.data[2:13])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_trim_floating_point_with_padding_2(self):
"""
Use more complicated times and sampling rate.
"""
tr = Trace(data=np.arange(111))
tr.stats.starttime = UTCDateTime(111.11111)
tr.stats.sampling_rate = 50.0
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Create temp trace object used for testing.
temp = deepcopy(tr)
temp.trim(UTCDateTime(111.22222), UTCDateTime(112.99999),
nearest_sample=False)
# Should again be identical.#XXX not
temp2 = deepcopy(tr)
temp2.trim(UTCDateTime(111.21111), UTCDateTime(113.01111),
nearest_sample=False)
np.testing.assert_array_equal(temp.data, temp2.data[1:-1])
# Check stuff.
self.assertEqual(temp.stats.starttime, UTCDateTime(111.23111))
self.assertEqual(temp.stats.endtime, UTCDateTime(112.991110))
# Check if the data is the same.
temp = deepcopy(tr)
temp.trim(UTCDateTime(0), UTCDateTime(1000 * 1000), pad=True)
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
# starttime must be in conformance with sampling rate
t = UTCDateTime(1969, 12, 31, 23, 59, 59, 991110)
self.assertEqual(temp.stats.starttime, t)
delta = int((tr.stats.starttime - t) * tr.stats.sampling_rate + .5)
np.testing.assert_array_equal(tr.data, temp.data[delta:delta + 111])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_add_sanity(self):
"""
Test sanity checks in __add__ method of the Trace object.
"""
tr = Trace(data=np.arange(10))
# you may only add a Trace object
self.assertRaises(TypeError, tr.__add__, 1234)
self.assertRaises(TypeError, tr.__add__, '1234')
self.assertRaises(TypeError, tr.__add__, [1, 2, 3, 4])
# trace id
tr2 = Trace()
tr2.stats.station = 'TEST'
self.assertRaises(TypeError, tr.__add__, tr2)
# sample rate
tr2 = Trace()
tr2.stats.sampling_rate = 20
self.assertRaises(TypeError, tr.__add__, tr2)
# calibration factor
tr2 = Trace()
tr2.stats.calib = 20
self.assertRaises(TypeError, tr.__add__, tr2)
# data type
tr2 = Trace()
tr2.data = np.arange(10, dtype=np.float32)
self.assertRaises(TypeError, tr.__add__, tr2)
def test_add_overlaps_default_method(self):
"""
Test __add__ method of the Trace object.
"""
# 1
# overlapping trace with differing data
# Trace 1: 0000000
# Trace 2: 1111111
tr1 = Trace(data=np.zeros(7))
tr2 = Trace(data=np.ones(7))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 00000--11111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 1, 1, 1, 1, 1])
# 2 + 1 : 00000--11111
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 1, 1, 1, 1, 1])
# 2
# overlapping trace with same data
# Trace 1: 0000000
# Trace 2: 0000000
tr1 = Trace(data=np.zeros(7))
tr2 = Trace(data=np.zeros(7))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 000000000000
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.zeros(12))
# 2 + 1 : 000000000000
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.zeros(12))
# 3
# contained trace with same data
# Trace 1: 1111111111
# Trace 2: 11
tr1 = Trace(data=np.ones(10))
tr2 = Trace(data=np.ones(2))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 1111111111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 2 + 1 : 1111111111
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 4
# contained trace with differing data
# Trace 1: 0000000000
# Trace 2: 11
tr1 = Trace(data=np.zeros(10))
tr2 = Trace(data=np.ones(2))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 00000--000
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 0, 0, 0])
# 2 + 1 : 00000--000
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 0, 0, 0])
# 5
# completely contained trace with same data until end
# Trace 1: 1111111111
# Trace 2: 1111111111
tr1 = Trace(data=np.ones(10))
tr2 = Trace(data=np.ones(10))
# 1 + 2 : 1111111111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 6
# completely contained trace with differing data
# Trace 1: 0000000000
# Trace 2: 1111111111
tr1 = Trace(data=np.zeros(10))
tr2 = Trace(data=np.ones(10))
# 1 + 2 : ----------
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(), [None] * 10)
def test_add_with_different_sampling_rates(self):
"""
Test __add__ method of the Trace object.
"""
# 1 - different sampling rates for the same channel should fail
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
self.assertRaises(TypeError, tr1.__add__, tr2)
self.assertRaises(TypeError, tr2.__add__, tr1)
# 2 - different sampling rates for the different channels works
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5))
tr3.stats.sampling_rate = 200
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5))
tr4.stats.sampling_rate = 50
tr4.stats.channel = 'EHZ'
# same sampling rate and ids should not fail
tr1 + tr3
tr3 + tr1
tr2 + tr4
tr4 + tr2
def test_add_with_different_datatypes_or_id(self):
"""
Test __add__ method of the Trace object.
"""
# 1 - different data types for the same channel should fail
tr1 = Trace(data=np.zeros(5, dtype=np.int32))
tr2 = Trace(data= | np.zeros(5, dtype=np.float32) | numpy.zeros |
# -*-coding: utf-8 -*-
"""
@Project: IntelligentManufacture
@File : image_processing.py
@Author : panjq
@E-mail : <EMAIL>
@Date : 2019-02-14 15:34:50
"""
import os
import copy
import re
import cv2
import numpy as np
import matplotlib.pyplot as plt
import requests
import matplotlib
import base64
import PIL.Image as Image
def get_color_map(nums=25):
colors = [
"#FF0000", "#FF7F50", "#B0171F", "#872657", "#FF00FF",
"#FFFF00", "#FF8000", "#FF9912", "#DAA569", "#FF6100",
"#0000FF", "#3D59AB", "#03A89E", "#33A1C9", "#00C78C",
"#00FF00", "#385E0F", "#00C957", "#6B8E23", "#2E8B57",
"#A020F0", "#8A2BE2", "#A066D3", "#DA70D6", "#DDA0DD"]
colors = colors * int(np.ceil(nums / len(colors)))
return colors
def get_color(id):
color = convert_color_map(COLOR_MAP[id])
return color
def set_class_set(class_set=set()):
global CLASS_SET
CLASS_SET = class_set
COLOR_MAP = get_color_map(200)
CLASS_SET = set()
cmap = plt.get_cmap('rainbow')
def get_colors(nums):
colors = [cmap(i) for i in np.linspace(0, 1, nums + 2)]
colors = [(c[2], c[1], c[0]) for c in colors]
return colors
def convert_color_map(color, colorType="BGR"):
'''
:param color:
:param colorType:
:return:
'''
assert (len(color) == 7 and color[0] == "#"), "input color error:color={}".format(color)
R = color[1:3]
G = color[3:5]
B = color[5:7]
R = int(R, 16)
G = int(G, 16)
B = int(B, 16)
if colorType == "BGR":
return (B, G, R)
elif colorType == "RGB":
return (R, G, B)
else:
assert "colorType error "
def bound_protection(points, height, width):
"""
Avoid array overbounds
:param points:
:param height:
:param width:
:return:
"""
points[points[:, 0] > width, 0] = width - 1 # x
points[points[:, 1] > height, 1] = height - 1 # y
# points[points[:, 0] < 0, 0] = 0 # x
# points[points[:, 1] < 0, 1] = 0 # y
return points
def tensor2image(batch_tensor, index=0):
"""
convert tensor to image
:param batch_tensor:
:param index:
:return:
"""
image_tensor = batch_tensor[index, :]
image = np.array(image_tensor, dtype=np.float32)
image = np.squeeze(image)
image = image.transpose(1, 2, 0) # 通道由[c,h,w]->[h,w,c]
return image
def get_image_tensor(image_path, image_size, transpose=False):
image = read_image(image_path)
# transform = default_transform(image_size)
# torch_image = transform(image).detach().numpy()
image = resize_image(image, int(128 * image_size[0] / 112), int(128 * image_size[1] / 112))
image = center_crop(image, crop_size=image_size)
image_tensor = image_normalization(image, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
if transpose:
image_tensor = image_tensor.transpose(2, 0, 1) # NHWC->NCHW
image_tensor = image_tensor[np.newaxis, :]
# std = np.std(torch_image-image_tensor)
return image_tensor
def image_clip(image):
"""
:param image:
:return:
"""
image = np.clip(image, 0, 1)
return image
def show_batch_image(title, batch_imgs, index=0):
'''
批量显示图片
:param title:
:param batch_imgs:
:param index:
:return:
'''
image = batch_imgs[index, :]
# image = image.numpy() #
image = np.array(image, dtype=np.float32)
image = np.squeeze(image)
if len(image.shape) == 3:
image = image.transpose(1, 2, 0) # 通道由[c,h,w]->[h,w,c]
else:
image = image.transpose(1, 0)
if title:
cv_show_image(title, image)
def show_image(title, rgb_image):
'''
调用matplotlib显示RGB图片
:param title: 图像标题
:param rgb_image: 图像的数据
:return:
'''
# plt.figure("show_image")
# print(image.dtype)
channel = len(rgb_image.shape)
if channel == 3:
plt.imshow(rgb_image)
else:
plt.imshow(rgb_image, cmap='gray')
plt.axis('on') # 关掉坐标轴为 off
plt.title(title) # 图像题目
plt.show()
def cv_show_image(title, image, type='rgb', waitKey=0):
'''
调用OpenCV显示RGB图片
:param title: 图像标题
:param image: 输入RGB图像
:param type:'rgb' or 'bgr'
:return:
'''
img = copy.copy(image)
channels = img.shape[-1]
if channels == 3 and type == 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # 将BGR转为RGB
if title:
cv2.imshow(title, img)
cv2.waitKey(waitKey)
def image_fliplr(image):
'''
左右翻转
:param image:
:return:
'''
image = np.fliplr(image)
return image
def get_prewhiten_image(x):
'''
图片白化处理
:param x:
:return:
'''
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
def image_normalization(image, mean=None, std=None):
'''
正则化,归一化
image[channel] = (image[channel] - mean[channel]) / std[channel]
:param image: numpy image
:param mean: [0.5,0.5,0.5]
:param std: [0.5,0.5,0.5]
:return:
'''
# 不能写成:image=image/255
if isinstance(mean, list):
mean = np.asarray(mean, dtype=np.float32)
if isinstance(std, list):
std = np.asarray(std, dtype=np.float32)
image = np.array(image, dtype=np.float32)
image = image / 255.0
if mean is not None:
image = np.subtract(image, mean)
if std is not None:
image = np.multiply(image, 1 / std)
return image
def data_normalization(data, ymin, ymax):
'''
NORMALIZATION 将数据x归一化到任意区间[ymin,ymax]范围的方法
:param data: 输入参数x:需要被归一化的数据,numpy
:param ymin: 输入参数ymin:归一化的区间[ymin,ymax]下限
:param ymax: 输入参数ymax:归一化的区间[ymin,ymax]上限
:return: 输出参数y:归一化到区间[ymin,ymax]的数据
'''
xmax = np.max(data) # %计算最大值
xmin = np.min(data) # %计算最小值
y = (ymax - ymin) * (data - xmin) / (xmax - xmin) + ymin
return y
def cv_image_normalization(image, min_val=0.0, max_val=1.0):
'''
:param image:
:param min_val:
:param max_val:
:param norm_type:
:param dtype:
:param mask:
:return:
'''
dtype = cv2.CV_32F
norm_type = cv2.NORM_MINMAX
out = np.zeros(shape=image.shape, dtype=np.float32)
cv2.normalize(image, out, alpha=min_val, beta=max_val, norm_type=norm_type, dtype=dtype)
return out
def get_prewhiten_images(images_list, normalization=False):
'''
批量白化图片处理
:param images_list:
:param normalization:
:return:
'''
out_images = []
for image in images_list:
if normalization:
image = image_normalization(image)
image = get_prewhiten_image(image)
out_images.append(image)
return out_images
def read_image(filename, resize_height=None, resize_width=None, normalization=False, colorSpace='RGB'):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:param colorSpace 输出格式:RGB or BGR
:return: 返回的图片数据
'''
bgr_image = cv2.imread(filename)
# bgr_image = cv2.imread(filename,cv2.IMREAD_IGNORE_ORIENTATION|cv2.IMREAD_UNCHANGED)
if bgr_image is None:
print("Warning: no image:{}".format(filename))
return None
if len(bgr_image.shape) == 2: # 若是灰度图则转为三通道
print("Warning:gray image", filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
if colorSpace == 'RGB':
image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) # 将BGR转为RGB
elif colorSpace == "BGR":
image = bgr_image
else:
exit(0)
# show_image(filename,image)
# image=Image.open(filename)
image = resize_image(image, resize_height, resize_width)
image = np.asanyarray(image)
if normalization:
image = image_normalization(image)
# show_image("src resize image",image)
return image
def read_image_pil(filename, resize_height=None, resize_width=None, normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:param colorSpace 输出格式:RGB or BGR
:return: 返回的图片数据
'''
rgb_image = Image.open(filename)
rgb_image = np.asarray(rgb_image)
if rgb_image is None:
print("Warning: no image:{}".format(filename))
return None
if len(rgb_image.shape) == 2: # 若是灰度图则转为三通道
print("Warning:gray image", filename)
rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_GRAY2BGR)
# show_image(filename,image)
# image=Image.open(filename)
image = resize_image(rgb_image, resize_height, resize_width)
if normalization:
image = image_normalization(image)
# show_image("src resize image",image)
return image
def read_image_gbk(filename, resize_height=None, resize_width=None, normalization=False, colorSpace='RGB'):
'''
解决imread不能读取中文路径的问题,读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:param colorSpace 输出格式:RGB or BGR
:return: 返回的RGB图片数据
'''
try:
with open(filename, 'rb') as f:
data = f.read()
data = np.asarray(bytearray(data), dtype="uint8")
bgr_image = cv2.imdecode(data, cv2.IMREAD_COLOR)
except Exception as e:
bgr_image = None
# 或者:
# bgr_image=cv2.imdecode(np.fromfile(filename,dtype=np.uint8),cv2.IMREAD_COLOR)
if bgr_image is None:
print("Warning: no image:{}".format(filename))
return None
if len(bgr_image.shape) == 2: # 若是灰度图则转为三通道
print("Warning:gray image", filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
if colorSpace == 'RGB':
image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) # 将BGR转为RGB
elif colorSpace == "BGR":
image = bgr_image
else:
exit(0)
# show_image(filename,image)
# image=Image.open(filename)
image = resize_image(image, resize_height, resize_width)
image = np.asanyarray(image)
if normalization:
image = image_normalization(image)
# show_image("src resize image",image)
return image
def requests_url(url):
'''
读取网络数据流
:param url:
:return:
'''
stream = None
try:
res = requests.get(url, timeout=15)
if res.status_code == 200:
stream = res.content
except Exception as e:
print(e)
return stream
def read_images_url(url, resize_height=None, resize_width=None, normalization=False, colorSpace='RGB'):
'''
根据url或者图片路径,读取图片
:param url:
:param resize_height:
:param resize_width:
:param normalization:
:param colorSpace:
:return:
'''
if re.match(r'^https?:/{2}\w.+$', url):
stream = requests_url(url)
if stream is None:
bgr_image = None
else:
content = np.asarray(bytearray(stream), dtype="uint8")
bgr_image = cv2.imdecode(content, cv2.IMREAD_COLOR)
# pil_image = PIL.Image.open(BytesIO(stream))
# rgb_image=np.asarray(pil_image)
# bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
else:
bgr_image = cv2.imread(url)
if bgr_image is None:
print("Warning: no image:{}".format(url))
return None
if len(bgr_image.shape) == 2: # 若是灰度图则转为三通道
print("Warning:gray image", url)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
if colorSpace == 'RGB':
image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) # 将BGR转为RGB
elif colorSpace == "BGR":
image = bgr_image
else:
pass
image = resize_image(image, resize_height, resize_width)
image = | np.asanyarray(image) | numpy.asanyarray |
import unittest
import numpy as np
from scipy.spatial.transform import Rotation
from d3d.dataset.kitti import KittiObjectClass
from d3d.abstraction import Target3DArray, ObjectTarget3D, ObjectTag, TrackingTarget3D
from d3d.benchmarks import DetectionEvaluator, DetectionEvalStats, TrackingEvaluator
class TestDetectionEvaluator(unittest.TestCase):
def test_calc_stats(self):
eval_classes = [KittiObjectClass.Car, KittiObjectClass.Van]
evaluator = DetectionEvaluator(eval_classes, [0.1, 0.2])
r = Rotation.from_euler("Z", 0)
d = [2, 2, 2]
dt1 = ObjectTarget3D(
[0, 0, 0], r, d,
ObjectTag(KittiObjectClass.Car, scores=0.8)
)
dt2 = ObjectTarget3D(
[1, 1, 1], r, d,
ObjectTag(KittiObjectClass.Van, scores=0.7)
)
dt3 = ObjectTarget3D(
[-1, -1, -1], r, d,
ObjectTag(KittiObjectClass.Pedestrian, scores=0.8)
)
dt_boxes = Target3DArray([dt1, dt2, dt3], frame="test")
# test match with self
result = evaluator.calc_stats(dt_boxes, dt_boxes)
for clsobj in eval_classes:
clsid = clsobj.value
assert result.ngt[clsid] == 1
assert result.ndt[clsid][0] == 1 and result.ndt[clsid][-1] == 0
assert result.tp[clsid][0] == 1 and result.tp[clsid][-1] == 0
assert result.fp[clsid][0] == 0 and result.fp[clsid][-1] == 0
assert result.fn[clsid][0] == 0 and result.fn[clsid][-1] == 1
assert np.isclose(result.acc_iou[clsid][0], 1) and np.isnan(result.acc_iou[clsid][-1])
assert np.isclose(result.acc_angular[clsid][0], 0) and np.isnan(result.acc_angular[clsid][-1])
assert np.isclose(result.acc_dist[clsid][0], 0) and np.isnan(result.acc_dist[clsid][-1])
assert np.isclose(result.acc_box[clsid][0], 0) and np.isnan(result.acc_box[clsid][-1])
assert | np.isinf(result.acc_var[clsid][0]) | numpy.isinf |
from smpr3d.util import *
from smpr3d.setup import *
from smpr3d.operators import *
from numpy.fft import fftfreq, fftshift
import numpy as np
import torch as th
#f4dstem
from timeit import default_timer as time
D = 1
K = 144
MY = MX = 32
f = np.array([2, 2])
N_coherent_simulation = np.array([MY, MX]) * f
dx_smatrix_simulation = [1.0, 1.0]
dx_coherent_detector = dx_smatrix_simulation
E = 300e3
lam = wavelength(E)
C1_target = np.linspace(1000, 2000, D, dtype=np.float32)
alpha_aperture_rad = 4e-3
q_aperture = alpha_aperture_rad / lam
dev = th.device('cuda:0')
q_detector = fourier_coordinates_2D([MY, MX], dx_smatrix_simulation)
# S-matrix lateral dimensions
qy0 = fftfreq(MY, dx_smatrix_simulation[0])
qx0 = fftfreq(MX, dx_smatrix_simulation[1])
# Fourier space grid on detector
qt = th.from_numpy(q_detector).float()
C1_target_th = th.from_numpy(C1_target)
A_init = aperture(qt, lam, alpha_aperture_rad, edge=0)
probe = ZernikeProbe(qt, lam, A_init, A_requires_grad=True, fft_shifted=True, C1=C1_target_th)
Psi_target = probe()
Psi = Psi_target.detach().to(dev)
# plot(A_init)
# %%
r = th.zeros((D, K, 2)).long().to(dev)
for d in range(D):
r[d, :, 0] = 14.5
xv = fftfreq(N_coherent_simulation[0], 1 / N_coherent_simulation[0])
yv = fftfreq(N_coherent_simulation[1], 1 / N_coherent_simulation[1])
[xa, ya] = np.meshgrid(np.round(yv), np.round(xv))
q_coherent_simulation = fourier_coordinates_2D(N_coherent_simulation, dx_coherent_detector)
q2_coherent_simulation = np.linalg.norm(q_coherent_simulation, axis=0) ** 2
beam_mask_coherent_simulation = (q2_coherent_simulation < np.max(q_aperture) ** 2) * (ya % f[0] == 0) * (xa % f[1] == 0)
beam_mask_coherent_simulation = th.from_numpy(beam_mask_coherent_simulation).bool()
B = th.sum(beam_mask_coherent_simulation).item()
beam_numbers_coherent_simulation = th.ones(tuple(N_coherent_simulation)) * -1
beam_numbers_coherent_simulation[beam_mask_coherent_simulation] = th.arange(B).float()
# plot(q2_coherent_simulation, 'q2_coherent_simulation')
# plot(beam_numbers_coherent_simulation.cpu(), 'beam_numbers_coherent_simulation')
# %%
qx, qy = np.meshgrid(fftfreq(MX), fftfreq(MY))
q = np.array([qy, qx])
coords = fftshift( | np.array(np.mgrid[-MY // 2:MY // 2, -MX // 2:MX // 2]) | numpy.array |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = | np.random.randn(1) | numpy.random.randn |
#Copyright 2020 <NAME>, <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import pickle
import pandas as pd
import numpy as np
import os
from pandas.api.types import is_numeric_dtype
from collections import Counter,defaultdict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import metrics
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy as np
from autogluon import TabularPrediction as task
import copy
import tensorflow_data_validation as tfdv
# print(tfdv.version.__version__)
from tensorflow_data_validation.utils.schema_util import get_feature,get_categorical_features,get_categorical_numeric_features,is_categorical_feature,get_multivalent_features
rf_Filename = "RandForest.pkl"
with open(rf_Filename, 'rb') as file: Pickled_LR_Model = pickle.load(file)
del_pattern = r'([^,;\|]+[,;\|]{1}[^,;\|]+){1,}'
del_reg = re.compile(del_pattern)
delimeters = r"(,|;|\|)"
delimeters = re.compile(delimeters)
url_pat = r"(http|ftp|https):\/\/([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?"
url_reg = re.compile(url_pat)
email_pat = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b"
email_reg = re.compile(email_pat)
stop_words = set(stopwords.words('english'))
def summary_stats(dat, key_s):
b_data = []
for col in key_s:
nans = np.count_nonzero(pd.isnull(dat[col]))
dist_val = len(pd.unique(dat[col].dropna()))
Total_val = len(dat[col])
mean = 0
std_dev = 0
var = 0
min_val = 0
max_val = 0
if is_numeric_dtype(dat[col]):
mean = np.mean(dat[col])
if pd.isnull(mean):
mean = 0
std_dev = 0
#var = 0
min_val = 0
max_val = 0
else:
std_dev = np.std(dat[col])
var = np.var(dat[col])
min_val = float(np.min(dat[col]))
max_val = float(np.max(dat[col]))
b_data.append([Total_val, nans, dist_val, mean, std_dev, min_val, max_val])
return b_data
def castability_feature(dat, column_names):
castability_list = []
#make sure the value you are avaluating is not nan
for keys in column_names:
#print(keys)
i = 0
while pd.isnull(dat[keys][i]):
i += 1
if i > len(dat[keys]) - 2:
break
#if type is string try casting
if dat[keys][i].__class__.__name__ == 'str':
try:
castability = str(type(eval(dat[keys][i])))
castability_list.append(1)
except:
castability_list.append(0)
else:
castability_list.append(0)
return castability_list
def numeric_extraction(dat,column_names):
#0 no , 1 yes
numeric_extraction_list = []
#make sure the value you are avaluating is not nan
for keys in column_names:
i = 0
while pd.isnull(dat[keys][i]):
i += 1
if i > len(dat[keys]) - 2:
break
val = 0
if dat[keys][i].__class__.__name__ == 'str':
#print('yes')
#check whether any number can be extracted
try:
#it will faile when you have no numbers or if you have two numbers seperated by space
float(re.sub('[^0-9\. ]', ' ',dat[keys][i]))
#print('yes')
val = 1
except:
pass
numeric_extraction_list.append(val)
return numeric_extraction_list
def get_sample(dat, key_s):
rand = []
for name in key_s: # TODO Omg this is bad. Should use key_s.
rand_sample = list(pd.unique(dat[name]))
rand_sample = rand_sample[:5]
while len(rand_sample) < 5:
rand_sample.append(list(pd.unique(dat[name]))[np.random.randint(len(list(pd.unique(dat[name]))))])
rand.append(rand_sample[:5])
return rand
def get_avg_tokens(samples):
# samples contain list of length len(keys) of 5-sample list.
avg_tokens = []
for sample_list in samples:
list_of_num_tokens = [len(str(sample).split()) for sample in sample_list]
avg_tokens.append(sum(list_of_num_tokens) / len(list_of_num_tokens))
return avg_tokens
# summary_stat_result has a structure like [[Total_val, nans, dist_va, ...], ...].
def get_ratio_dist_val(summary_stat_result):
ratio_dist_val = []
for r in summary_stat_result:
ratio_dist_val.append(r[2]*100.0 / r[0])
return ratio_dist_val
def get_ratio_nans(summary_stat_result):
ratio_nans = []
for r in summary_stat_result:
ratio_nans.append(r[1]*100.0 / r[0])
return ratio_nans
# y = df['out/in']
def FeaturizeFile(df):
# df = pd.read_csv(CSVfile,encoding = 'latin1')
stats = []
attribute_name = []
sample = []
id_value = []
i = 0
castability = []
number_extraction = []
avg_tokens = []
ratio_dist_val = []
ratio_nans = []
keys = list(df.keys())
attribute_name.extend(keys)
summary_stat_result = summary_stats(df, keys)
stats.extend(summary_stat_result)
samples = get_sample(df,keys)
sample.extend(samples)
# castability.extend(castability_feature(df, keys))
# number_extraction.extend(numeric_extraction(df, keys))
# avg_tokens.extend(get_avg_tokens(samples))
ratio_dist_val.extend(get_ratio_dist_val(summary_stat_result))
ratio_nans.extend(get_ratio_nans(summary_stat_result))
csv_names = ['Attribute_name', 'total_vals', 'num_nans', 'num_of_dist_val', 'mean', 'std_dev', 'min_val',
'max_val', '%_dist_val', '%_nans', 'sample_1', 'sample_2', 'sample_3','sample_4','sample_5'
]
golden_data = pd.DataFrame(columns = csv_names)
for i in range(len(attribute_name)):
# print(attribute_name[i])
val_append = []
val_append.append(attribute_name[i])
val_append.extend(stats[i])
val_append.append(ratio_dist_val[i])
val_append.append(ratio_nans[i])
val_append.extend(sample[i])
# val_append.append(castability[i])
# val_append.append(number_extraction[i])
# val_append.append(avg_tokens[i])
golden_data.loc[i] = val_append
# print(golden_data)
curdf = golden_data
for row in curdf.itertuples():
# print(row[11])
is_list = False
curlst = [row[11],row[12],row[13],row[14],row[15]]
delim_cnt,url_cnt,email_cnt,date_cnt =0,0,0,0
chars_totals,word_totals,stopwords,whitespaces,delims_count = [],[],[],[],[]
for value in curlst:
word_totals.append(len(str(value).split(' ')))
chars_totals.append(len(str(value)))
whitespaces.append(str(value).count(' '))
if del_reg.match(str(value)): delim_cnt += 1
if url_reg.match(str(value)): url_cnt += 1
if email_reg.match(str(value)): email_cnt += 1
delims_count.append(len(delimeters.findall(str(value))))
tokenized = word_tokenize(str(value))
# print(tokenized)
stopwords.append(len([w for w in tokenized if w in stop_words]))
try:
_ = pd.Timestamp(value)
date_cnt += 1
except ValueError: date_cnt += 0
# print(delim_cnt,url_cnt,email_cnt)
if delim_cnt > 2: curdf.at[row.Index, 'has_delimiters'] = True
else: curdf.at[row.Index, 'has_delimiters'] = False
if url_cnt > 2: curdf.at[row.Index, 'has_url'] = True
else: curdf.at[row.Index, 'has_url'] = False
if email_cnt > 2: curdf.at[row.Index, 'has_email'] = True
else: curdf.at[row.Index, 'has_email'] = False
if date_cnt > 2: curdf.at[row.Index, 'has_date'] = True
else: curdf.at[row.Index, 'has_date'] = False
curdf.at[row.Index, 'mean_word_count'] = np.mean(word_totals)
curdf.at[row.Index, 'std_dev_word_count'] = np.std(word_totals)
curdf.at[row.Index, 'mean_stopword_total'] = np.mean(stopwords)
curdf.at[row.Index, 'stdev_stopword_total'] = np.std(stopwords)
curdf.at[row.Index, 'mean_char_count'] = np.mean(chars_totals)
curdf.at[row.Index, 'stdev_char_count'] = np.std(chars_totals)
curdf.at[row.Index, 'mean_whitespace_count'] = np.mean(whitespaces)
curdf.at[row.Index, 'stdev_whitespace_count'] = | np.std(whitespaces) | numpy.std |
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
import math
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
#####################################
# PARAMETERS - This can be modified #
#####################################
f01 = 0.25
f02 = 0.05
N = 10
nf = 1024
#####################
# END OF PARAMETERS #
#####################
n = np.arange(N)
x1 = np.cos(2 * math.pi * f01 * n)
x2 = np.cos(2 * math.pi * f02 * n)
# MLE aproximado
w, X1 = signal.freqz(x1, a=1, worN=nf, whole=False, plot=None)
w, X2 = signal.freqz(x2, a=1, worN=nf, whole=False, plot=None)
J1_app = np.square(np.absolute(X1)) * 2 / N
J2_app = np.square(np.absolute(X2)) * 2 / N
f = np.arange(0, nf) * 0.5 / nf
# MLE exacto
J1 = np.zeros((nf, ))
J2 = np.zeros((nf, ))
for i in np.arange(1, nf):
# se comienza en f[1] porque f[0] = 0 produce una matriz no invertible
H = np.vstack((np.cos(2 * math.pi * f[i] * n), np.sin(2 * math.pi * f[i] * n))).T
A = H @ np.linalg.inv(H.T @ H) @ H.T
J1[i] = np.dot(x1, np.dot(A, x1))
J2[i] = np.dot(x2, np.dot(A, x2))
# estimadores
f01_app_est = f[np.argmax(J1_app)]
f01_est = f[np.argmax(J1)]
f02_app_est = f[np.argmax(J2_app)]
f02_est = f[np.argmax(J2)]
print(f01_app_est)
print(f01_est)
print(f02_app_est)
print(f02_est)
# abscissa values
xmin = 0
xmax = 0.5
ymin = 0
ymax = 6
# axis parameters
xmin_ax = xmin
xmax_ax = xmax + 0.03
# para la grafica de g(x)
ymax_ax = ymax + 1
ymin_ax = ymin - 1
# length of the ticks for all subplot (6 pixels)
display_length = 6 # in pixels
# x ticks labels margin
xtm = -0.8
ytm = 0.007
# font size
fontsize = 12
# colors from coolwarm
cNorm = colors.Normalize(vmin=0, vmax=1)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cm.coolwarm)
col10 = scalarMap.to_rgba(0)
col20 = scalarMap.to_rgba(1)
fig = plt.figure(0, figsize=(9, 6), frameon=False)
# grafica de g(x)
ax = plt.subplot2grid((8, 1), (0, 0), rowspan=4, colspan=1)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.plot(f[1:], J1[1:], color=col10, lw=2, label='$J(f)\;\mathrm{{exacta}}\;(\hat{{f_0}}={:.2f})$'.format(f01_est))
plt.plot(f, J1_app, color=col20, lw=2, label='$\mathrm{{Periodograma}}\;(\hat{{f_0}}={:.2f})$'.format(f01_app_est))
plt.plot(f[ | np.argmax(J1) | numpy.argmax |
import cv2
import pytesseract
import numpy as np
import pandas as pd
import os
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\\tesseract.exe'
# HSV Mask to capture all Profile names only on img
def masked(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([0, 0, 0])
upper = np.array([179, 255, 145])
mask = cv2.inRange(hsv, lower, upper)
return mask
# Raw Follower Test Folder SC to Cropped SC on -> Cropped_Follwers Folder
def resize(counter,file_name):
x = counter + 1
img = cv2.imread("Raw Pics/" + file_name + "(" + str(x) + ").png", cv2.IMREAD_UNCHANGED)
#Rimg = cv2.resize(img, (500, 500))
#cropped = Rimg[80:420, 100:350]
cropped = img[200:1900, 220:755]
#cv2.imshow("PIC", img)
#cv2.imshow("resize", Rimg)
#cv2.imshow("cropped", cropped)
#cv2.waitKey(0)
cv2.imwrite("Cropped_Pics/" + str(counter) + ".png", cropped, params=None)
def img_section(img,names,database, Recorded_date, Recorder):
last = 0
for y in range(1, 10):
section = img[last: 180 * y, :]
last = 180 * y
mimg = masked(section)
#cv2.imshow("sectioned HSV", mimg)
#cv2.waitKey(0)
hImg, wImg, _ = section.shape
boxes = pytesseract.image_to_data(mimg)
print(boxes)
# create each identified word as a list
for x, b in enumerate(boxes.splitlines()):
if x != 0:
b = b.split()
#print(b)
if len(b) == 12:
if b[11] not in database:
names.append(b[11])
database[len(names) - 1][4] = b[11]
database[len(names) - 1][6] = Recorded_date
database[len(names) - 1][7] = Recorder
#print(names)
break
names = []
database = | np.empty([500, 8], dtype=object) | numpy.empty |
import numpy as np
from joblib import Parallel, delayed
np.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
def euclidean_distance(point1_xy, point2_xy, x_coord=0, y_coord=1, c_coord=2):
diff_x = point2_xy[x_coord] - point1_xy[x_coord]
diff_y = point2_xy[y_coord] - point1_xy[y_coord]
square_add = np.power(diff_x, 2) + np.power(diff_y, 2)
distance = np.sqrt(square_add)
return distance
def area_polygon(xy_coordinates, x_coord=0, y_coord=1, c_coord=2):
"""
Enclosed polygon area calculation using Shoelace formula
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Parameters
----------
xy_coordinates : array of shape (num_points_closed_figure, 2)
Returns
-------
area : Scalar value of area of the closed shape
"""
x_coordinates = xy_coordinates[:, x_coord]
y_coordinates = xy_coordinates[:, y_coord]
dot_product = np.dot(x_coordinates, np.roll(y_coordinates, 1))
area = 0.5*np.abs(dot_product)
return area
def trajectory_area(onesample, keypoint_idx, x_coord=0, y_coord=1, c_coord=2):
x_coordinates = onesample[:, keypoint_idx, x_coord]
y_coordinates = onesample[:, keypoint_idx, y_coord]
dot_product = np.dot(x_coordinates, np.roll(y_coordinates, 1))
area = 0.5*np.abs(dot_product)
return area
def shape_orientation_angel(point1_xy, point2_xy):
numer = point2_xy[1] - point1_xy[1]
denom = point2_xy[0] - point1_xy[0]
alpha = np.arctan2(numer, denom)
return alpha
def shape_perimeter(shape_xy_points):
perimeter = 0.0
num_points = shape_xy_points.shape[0]
for onepoint in range(num_points):
if onepoint < (num_points - 2):
point1_xy = shape_xy_points[onepoint, :]
point2_xy = shape_xy_points[onepoint+1, :]
dist = euclidean_distance(point1_xy, point2_xy)
elif onepoint == (num_points - 1):
point1_xy = shape_xy_points[0, :]
point2_xy = shape_xy_points[onepoint, :]
dist = euclidean_distance(point1_xy, point2_xy)
perimeter = dist + perimeter
return perimeter
def trajectory_perimeter(onesample, keypoint_idx, x_coord=0, y_coord=1, c_coord=2):
perimeter = 0.0
dist = 0.0
num_frames = onesample.shape[0]
for oneframe_idx in range(num_frames):
if oneframe_idx < (num_frames - 2):
point1_xy = onesample[oneframe_idx, keypoint_idx, :]
point2_xy = onesample[oneframe_idx+1, keypoint_idx, :]
dist = euclidean_distance(point1_xy, point2_xy)
elif oneframe_idx == (num_frames - 1):
point1_xy = onesample[0, keypoint_idx, :]
point2_xy = onesample[oneframe_idx, keypoint_idx, :]
dist = euclidean_distance(point1_xy, point2_xy)
perimeter = dist + perimeter
return perimeter
def shape_compactness(shape_area, shape_perimeter):
numer = 4*(np.pi)*shape_area
denom = np.power(shape_perimeter, 2)
try:
compactness = numer/denom
except FloatingPointError:
print("Exception shape_compactness")
compactness = 0.0
return compactness
def law_of_cosines(vertexA, vertexB, vertexC):
"""
angle will be inscribed at vertexC
"""
sideA = euclidean_distance(vertexB, vertexC)
sideB = euclidean_distance(vertexA, vertexC)
sideC = euclidean_distance(vertexA, vertexB)
# length_scaling_factor = 1e6
numer = (np.power(sideA, 2) + np.power(sideB, 2) - np.power(sideC, 2))
denom = (2*sideA*sideB)
try:
angle_C = np.arccos(numer/denom)
except FloatingPointError:
print("Exception law_of_cosines")
angle_C = 0.0
return angle_C
###############################################################################
###############################################################################
def facial_features_oneframe(oneframe):
"""
Features calculated using the paper https://ieeexplore.ieee.org/abstract/document/4813472
"""
mouth_area = area_polygon(oneframe[48:60, :])
mouth_height = euclidean_distance(oneframe[51, :], oneframe[57, :])
mouth_width = euclidean_distance(oneframe[48, :], oneframe[54, :])
# alpha = shape_orientation_angel(oneframe[57, :], oneframe[51, :])
# mouth_o1_orientation = np.sin(2*alpha)
# mouth_o2_orientation = np.cos(alpha)
# perimeter = shape_perimeter(oneframe[48:60, :])
# compactness = shape_compactness(mouth_area, perimeter)
# try:
# eccentricity = mouth_height/mouth_width
# except FloatingPointError:
# print("Exception facial_features_oneframe")
# eccentricity = 0.0
# return mouth_area, mouth_height, mouth_width, mouth_o1_orientation, mouth_o2_orientation, compactness, eccentricity
return mouth_area, mouth_height, mouth_width
### Main facial features from all frames for one sample
def facial_features_onesample(onesample):
face_data = onesample[:, 25:95, :]
num_frames = face_data.shape[0]
features = np.asarray(Parallel(n_jobs=-1)(delayed(facial_features_oneframe)(face_data[oneframe_idx, :, :]) for oneframe_idx in range(num_frames)))
rows, cols = features.shape[0], features.shape[1]
facial_features_onesample = np.zeros((rows, cols+6)) # change here to +6 for velocity as well
facial_features_onesample[:, :cols] = features
mouth_area_column_idx = 0
mouth_height_column_idx = 1
mouth_width_column_idx = 2
# facial_features_onesample[:, cols] = np.gradient(features[:, mouth_area_column_idx]) #velocity
# facial_features_onesample[:, cols+1] = np.gradient(features[:, mouth_height_column_idx]) #velocity
# facial_features_onesample[:, cols+2] = np.gradient(features[:, mouth_width_column_idx]) #velocity
# facial_features_onesample[:, cols+3] = np.gradient(facial_features_onesample[:, cols]) #acceleration
# facial_features_onesample[:, cols+4] = np.gradient(facial_features_onesample[:, cols+1]) #acceleration
# facial_features_onesample[:, cols+5] = np.gradient(facial_features_onesample[:, cols+2]) #acceleration
facial_features_onesample_mean = np.mean(facial_features_onesample, axis=0)
facial_features_onesample = facial_features_onesample_mean
return facial_features_onesample
###############################################################################
def body_features_oneframe(oneframe):
"""
Features calculated using the paper https://link.springer.com/chapter/10.1007/978-3-319-20801-5_59
"""
# body angles
# l_angle_shoulder_torso = law_of_cosines(oneframe[3, :], oneframe[1, :], oneframe[2, :])
# l_angle_elbow = law_of_cosines(oneframe[4, :], oneframe[2, :], oneframe[3, :])
# l_angle_wrist = law_of_cosines(oneframe[1, :], oneframe[2, :], oneframe[4, :])
# r_angle_shoulder_torso = law_of_cosines(oneframe[6, :], oneframe[1, :], oneframe[5, :])
# r_angle_elbow = law_of_cosines(oneframe[7, :], oneframe[5, :], oneframe[6, :])
# r_angle_wrist = law_of_cosines(oneframe[1, :], oneframe[5, :], oneframe[7, :])
# body euclidean distances
l_arm = euclidean_distance(oneframe[2, :], oneframe[4, :])
r_arm = euclidean_distance(oneframe[5, :], oneframe[7, :])
l_wrist_torso = euclidean_distance(oneframe[4, :], oneframe[1, :])
r_wrist_torso = euclidean_distance(oneframe[7, :], oneframe[1, :])
l_elbow_hip = euclidean_distance(oneframe[3, :], oneframe[8, :])
r_elbow_hip = euclidean_distance(oneframe[6, :], oneframe[8, :])
l_wrist_r_shoulder = euclidean_distance(oneframe[4, :], oneframe[5, :])
r_wrist_l_shoulder = euclidean_distance(oneframe[2, :], oneframe[7, :])
wrist_to_wrist = euclidean_distance(oneframe[4, :], oneframe[7, :])
elbow_to_elbow = euclidean_distance(oneframe[3, :], oneframe[6, :])
# return l_angle_shoulder_torso, l_angle_elbow, l_angle_wrist, r_angle_shoulder_torso, r_angle_elbow, r_angle_wrist, l_arm, r_arm, l_wrist_torso, r_wrist_torso, l_elbow_hip, r_elbow_hip, l_wrist_r_shoulder, r_wrist_l_shoulder, wrist_to_wrist, elbow_to_elbow
# indices as 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
return l_arm, r_arm, l_wrist_torso, r_wrist_torso, l_elbow_hip, r_elbow_hip, l_wrist_r_shoulder, r_wrist_l_shoulder, wrist_to_wrist, elbow_to_elbow
### Main BODY features from all frames for one sample
def body_features_onesample(onesample):
body_data = onesample[:, 0:25, :]
num_frames = body_data.shape[0]
features = np.asarray(Parallel(n_jobs=-1)(delayed(body_features_oneframe)(body_data[oneframe_idx, :, :]) for oneframe_idx in range(num_frames)))
rows, cols = features.shape[0], features.shape[1]
body_features_onesample = np.zeros((rows, cols+8))
body_features_onesample[:, :cols] = features
# first and second order derivative of l_angle_wrist, r_angle_wrist, l_wrist_torso, r_wrist_torso
body_features_onesample[:, cols] = np.gradient(features[:, 2]) #2 l_angle_wrist
body_features_onesample[:, cols+1] = np.gradient(features[:, 3]) #5 r_angle_wrist
body_features_onesample[:, cols+2] = np.gradient(features[:, 8])
body_features_onesample[:, cols+3] = np.gradient(features[:, 9])
# body_features_onesample[:, cols+4] = np.gradient(body_features_onesample[:, cols])
# body_features_onesample[:, cols+5] = np.gradient(body_features_onesample[:, cols+1])
# body_features_onesample[:, cols+6] = np.gradient(body_features_onesample[:, cols+2])
# body_features_onesample[:, cols+7] = np.gradient(body_features_onesample[:, cols+3])
body_features_onesample_mean = np.mean(body_features_onesample, axis=0)
body_features_onesample = body_features_onesample_mean
return body_features_onesample
###############################################################################
def one_hand_calculations(hand_data):
# angle_thumb_index = law_of_cosines(hand_data[8, :], hand_data[4, :], hand_data[2, :])
# angle_index_middle = law_of_cosines(hand_data[12, :], hand_data[8, :], hand_data[5, :])
# angle_middle_ring = law_of_cosines(hand_data[16, :], hand_data[12, :], hand_data[9, :])
# angle_ring_pinky = law_of_cosines(hand_data[20, :], hand_data[16, :], hand_data[13, :])
thumb_length = euclidean_distance(hand_data[2, :], hand_data[4, :])
index_length = euclidean_distance(hand_data[5, :], hand_data[8, :])
middle_length = euclidean_distance(hand_data[9, :], hand_data[12, :])
ring_length = euclidean_distance(hand_data[13, :], hand_data[16, :])
pinky_length = euclidean_distance(hand_data[17, :], hand_data[20, :])
finger_tips_points_xy = np.array([hand_data[0, :], hand_data[4, :], hand_data[8, :], hand_data[12, :], hand_data[16, :], hand_data[20, :]])
area = area_polygon(finger_tips_points_xy)
alpha = shape_orientation_angel(hand_data[5, :], hand_data[17, :])
hand_o1_orientation = np.sin(2*alpha)
hand_o2_orientation = | np.cos(alpha) | numpy.cos |
import numpy as np
import matplotlib.pyplot as plt
import time
import pandas as pd
from copy import deepcopy
global N
N = 8
"Defining the class chromosome with the property of length"
"In constructor function we create a list with random true and false values"
"and we set the fitness of that chromosome to -inf"
class Chromosome:
def __init__(self, length):
self.genes = chrom_generator(N) # with length N from [1,N+1)
self.cost = float('+inf')
def __len__(self):
return len(self.genes)
def reset(self):
self.cost = float('+inf')
"We initialize the first generation (make a size number of lists with the length chrome_size)"
"chrome_size is the number of queens"
def population_init(size, chrom_size): return np.array(
[Chromosome(chrom_size) for _ in range(size)])
"This function generates a chromosome with deferent genes"
def chrom_generator(n):
a = []
rand = np.random.randint(0, n)
for i in range(n):
rand = np.random.randint(0, n)
while rand in a:
rand = | np.random.randint(0, n) | numpy.random.randint |
#!/usr/bin/env python3
import argparse
import os
import sys
import time
import datetime
import numpy
from xopen import xopen
parser = argparse.ArgumentParser(description="Parse binary memtrace to csv (energy <> bytes)")
parser.add_argument("memtrace", help="memtrace binary (read from stdin if not provided)", default='-', nargs="?")
parser.add_argument("-c", "--stdout", help="write to standard output", action="store_true", default=False)
parser.add_argument("-o", "--output", help="write to file (disabled if stdout is used)", default=False)
parser.add_argument("-b", "--block-size", type=int, default=1000000, help="cycles read at once (default: %(default)s)")
parser.add_argument("-l", "--compress-limit", type=int, default=100000000, help="compress after that many cycles (default: %(default)s)")
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="shhhhhh...")
args = parser.parse_args()
if not args.output:
args.stdout = True
if args.memtrace and args.memtrace == '-':
args.memtrace = False
if args.memtrace and not os.path.isfile(args.trace):
print("ERROR: binary memtrace file not found!")
parser.print_help()
sys.exit(1)
updateInterval = args.compress_limit + 1
lastTime = time.time()
sampleCount = 0
bufCycles = args.block_size
if args.memtrace:
traceFile = xopen(args.memtrace, mode='rb')
else:
traceFile = sys.stdin.buffer
if args.memtrace and not (args.memtrace.endswith('.gz') or args.memtrace.endswith('.bz2') or args.memtrace.endswith('.xz')):
traceFile.seek(0, os.SEEK_END)
sampleCount = int(traceFile.tell() / 32)
traceFile.seek(0, os.SEEK_SET)
else:
if not args.quiet:
print('WARNING: cannot show progress for this type of input file', file=sys.stderr)
sampleCount = 0
currentCycles = 0
nextUpdate = 0
lastCycles = -updateInterval
runningTime = 0
acc = {}
saddrs = numpy.array([], dtype=numpy.uint64)
scounts = numpy.array([], dtype=numpy.uint64)
sbytes = numpy.array([], dtype=numpy.uint64)
nextCompressCycle = currentCycles + args.compress_limit
def scompress():
nsaddrs, inv = numpy.unique(saddrs, return_inverse=True)
nscounts = numpy.zeros(len(nsaddrs), dtype=numpy.uint64)
nsbytes = numpy.zeros(len(nsaddrs), dtype=numpy.uint64)
numpy.add.at(nscounts, inv, scounts)
numpy.add.at(nsbytes, inv, sbytes)
return (nsaddrs, nscounts, nsbytes)
while True:
buf = traceFile.read(bufCycles * 32)
if not buf:
break
if not args.quiet and currentCycles >= lastCycles + updateInterval:
currentTime = time.time()
elapsed = currentTime - lastTime
if currentCycles == 0 or elapsed <= 0:
samplesPerSecond = remainingTime = 'n/a'
else:
samplesPerSecond = int((currentCycles - lastCycles) / elapsed)
if sampleCount == 0:
progress = f'{currentCycles} cycles'
remainingTime = 'n/a'
else:
progress = str(int((currentCycles + 1) * 100 / sampleCount)) + '%'
remainingTime = datetime.timedelta(seconds=int((sampleCount - currentCycles) / samplesPerSecond)) if samplesPerSecond != 0 else 'n/a'
print(f"\rPost processing... {progress} (ETA: {remainingTime}, {samplesPerSecond} samples/s) ", end="", file=sys.stderr)
lastTime = currentTime
lastCycles = currentCycles
rawCycles = int(len(buf) / 32)
currentCycles += rawCycles
# Decode the data into a numpy array
decoded = numpy.ndarray((rawCycles, 4), dtype='<Q', buffer=buf)
# If any cycles are left
if decoded.shape[0] > 0:
# Compress this block
naddrs, inv, ncounts = numpy.unique(decoded[:, 3], return_inverse=True, return_counts=True)
nbytes = numpy.zeros(len(naddrs), dtype=numpy.uint64)
numpy.add.at(nbytes, inv, decoded[:, 2])
# Append results
saddrs = numpy.append(saddrs, naddrs)
sbytes = numpy.append(sbytes, nbytes)
scounts = numpy.append(scounts, ncounts)
# Compress data, else we would accumalate too much over time
if currentCycles >= nextCompressCycle:
saddrs, scounts, sbytes = scompress()
nextCompressCycle = currentCycles + args.compress_limit
# for sample in decoded:
# if sample[3] not in acc:
# acc[sample[3]] = [1, sample[2]]
# else:
# acc[sample[3]][0] += 1
# acc[sample[3]][1] += sample[2]
# for i, addr in enumerate(saddrs):
# assert(addr in acc)
# assert(acc[addr][0] == scounts[i])
# assert(acc[addr][1] == sbytes[i])
# if currentCycles >= 100:
# break
# Make sure a last time to compress
saddrs, scounts, sbytes = scompress()
if args.stdout:
csvFile = sys.stdout
args.output = False
if args.output:
csvFile = xopen(args.output, 'w')
csvFile.write('time;power0;pc0;bytes;count\n')
csvFile.write('0;0;0;0;0\n')
numpy.savetxt(csvFile,
numpy.concatenate(
(
numpy.cumsum(scounts, dtype=numpy.uint64).reshape(-1, 1),
numpy.array(sbytes / scounts, dtype=numpy.float64).reshape(-1, 1),
| numpy.array(saddrs, dtype=numpy.uint64) | numpy.array |
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
'''Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
'''
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
'''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
'''
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
#pylint: disable=unused-argument
'''Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
'''
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = | np.random.RandomState(self.random_state) | numpy.random.RandomState |
# -*- coding: utf-8 -*-
class visual_():
def __init__(self, file_path):
self.dir= file_path
def show_error(self, iteration, error, name1, name2, dim):
# 画 L_2 relative error vs. iteration 图像的函数
# for drawing L_2 relative error vs. iteration
plt.figure(figsize=(8,7))
plt.semilogy(iteration, error, color='b')
plt.xlabel("Iteration", size=28)
plt.ylabel(name1, size=28)
plt.tight_layout()
plt.savefig(self.dir+'figure_err/error_iter_%s_%dd.png'%(name2, dim))
plt.close()
def show_error_abs(self, mesh, x_y, z, name, dim):
# 画pointwise absolute error 图像的函数
# for drawing point-wise absolute error
x= np.ravel(x_y[:,0])
y= np.ravel(x_y[:,1])
#
xi,yi = mesh
zi = griddata((x, y), np.ravel(z), (xi, yi), method='linear')
plt.figure(figsize=(8,7))
plt.contourf(xi, yi, zi, 15, cmap=plt.cm.jet)
plt.colorbar()
plt.xlim(np.min(xi), np.max(xi))
plt.xlabel('x', fontsize=28)
plt.ylim(np.min(yi), np.max(yi))
plt.ylabel('y', fontsize=28)
plt.tight_layout()
plt.savefig(self.dir+'figure_err/error_abs_%s_%dd.png'%(name, dim))
plt.close()
def show_u_val(self, mesh, x_y, z1, z2, name, num):
x= np.ravel(x_y[:,0])
y= np.ravel(x_y[:,1])
#
xi,yi = mesh
#*******************
fig= plt.figure(figsize=(12,5))
ax1= fig.add_subplot(1,2,1)
z1i = griddata((x, y), np.ravel(z1), (xi, yi), method='linear')
graph1= plt.contourf(xi, yi, z1i, 15, cmap=plt.cm.jet)
fig.colorbar(graph1, ax= ax1)
#
ax2= fig.add_subplot(1,2,2)
z2i= griddata((x, y), np.ravel(z2), (xi, yi), method='linear')
graph2= ax2.contourf(xi, yi, z2i, 15, cmap= cm.jet)
fig.colorbar(graph2, ax= ax2)
#*******************
plt.tight_layout()
plt.savefig(self.dir+'figure_%s/iwan_%s_%d.png'%(name, name, num))
plt.close()
def show_v_val(self, mesh, x_y, z, name, num):
x= np.ravel(x_y[:,0])
y= np.ravel(x_y[:,1])
#
xi,yi = mesh
zi = griddata((x, y), np.ravel(z), (xi, yi), method='linear')
plt.figure(figsize=(8,7))
plt.contourf(xi, yi, zi, 15, cmap=plt.cm.jet)
plt.colorbar()
plt.xlim(np.min(xi), np.max(xi))
plt.xlabel('x', fontsize=28)
plt.ylim(np.min(yi), np.max(yi))
plt.ylabel('y', fontsize=28)
plt.tight_layout()
plt.savefig(self.dir+'figure_%s/iwan_%s_%d.png'%(name, name, num))
plt.close()
class wan_inv():
def __init__(self, dim, noise_level, dm_size, bd_size, beta_u, beta_bd,
u_step, u_rate, v_step, v_rate, file_path, iteration):
import numpy as np
global np
#
import time
global time
#
import tensorflow as tf
global tf
#
import matplotlib.pyplot as plt
global plt
#
from scipy.interpolate import griddata
global griddata
#
from scipy.stats import truncnorm
global truncnorm
#
from matplotlib import cm
global cm
#
self.dim= dim #问题的维度
self.noise_level= noise_level
self.up, self.low= 1.0, -1.0 #矩形区域[-1,1]^d
self.k= [0.81, 2.0]+[0.09]*(dim-2) #\omega_1区域表达式中:前面的系数
self.c_a= [0.1, 0.3]+[0.0]*(dim-2) #\omega_1区域表达式中:区域的中心
self.c_u= [0, 0]+[0.0]*(dim-2) #真实解表达式中:最小值点
self.r= 0.6 #\omega_1区域表达式中:半径值
self.alpha= 0.02 #用来控制不连续程度的值(越小奇异性越大)
self.a1= 2.0 #coefficient a(x) 在\omega_1区域内的值
self.a2= 0.5 #coefficient a(x) 在\omega_1区域之外的值
self.mesh_size= 100 #用来生成testing data
self.beta_u= beta_u #loss function for boundary of u(x) 前面的参数
self.beta_bd= beta_bd
#
self.v_layer= 6 #test function v 的hidden layers 层数
self.v_h_size= 20 #test function v 每层的neuron 数目
#
self.a_layer= 4
self.a_h_size= 20
self.u_layer= 6
self.u_h_size= 20
#
self.u_step= u_step #解u(x)内循环(神经网络u的迭代步数)
self.u_rate= u_rate #解u(x)内循环(神经网络u的learning rate)
self.v_step_u= v_step #解u(x)内循环(test function v的迭代步数)
self.v_rate_u= v_rate #解u(x)内循环(test function v的learning rate)
#
self.dm_size= dm_size #内部采样点数目
self.bd_size= bd_size #边界采样点数目
self.iteration= iteration
#
self.dir= file_path #运行的时候需要建一个文件夹,以此名字命名,然后在该文件夹下面
#新建文件夹figure_err, figure_u, figure_a, figure_v,分别用来保存中间过程输出的图像
def get_truncated_normal(self, mean=0.0, sd=1.0):
# 观测噪音生成函数
#for adding noise
low= -100; up= 100
result= truncnorm((low-mean)/sd, (up-mean)/sd, loc=mean, scale=sd)
return(result)
def sample_train(self, dm_size, bd_size, dim):
# 生成训练数据
low, up= self.low, self.up
distb= self.get_truncated_normal()
#********************************************************
# collocation points in domain
x_dm= np.random.uniform(low, up, [dm_size, dim])
#*********************************************************
# The value of f(x)
omega_a, omega_u= 0.0, 0.0
for i in range(dim):
omega_a= omega_a+self.k[i]**2*(x_dm[:,i]-self.c_a[i])**2
omega_u= omega_u+self.k[i]**2*(x_dm[:,i]-self.c_a[i])*(x_dm[:,i]-self.c_u[i])
exp_term= np.exp((omega_a-self.r**2)/self.alpha)
#
part_one= 4*(self.a1-self.a2)*omega_u/(self.alpha/exp_term+2*self.alpha+self.alpha*exp_term)
part_two= 2*dim*(self.a2*(1-1/(1+exp_term))+self.a1/(1+exp_term))
f_dm= part_one-part_two
f_dm= np.reshape(f_dm, [-1,1])
#*********************************************************
# collocation points on boundary
x_bd_list=[]
n_vector_list=[]
for i in range(dim):
x_bound= np.random.uniform(low, up, [bd_size, dim])
x_bound[:,i]= up
x_bd_list.append(x_bound)
n_vector= np.zeros_like(x_bound)
n_vector[:,i]=1
n_vector_list.append(n_vector)
x_bound= np.random.uniform(low, up, [bd_size, dim])
x_bound[:,i]= low
x_bd_list.append(x_bound)
n_vector= np.zeros_like(x_bound)
n_vector[:,i]=-1
n_vector_list.append(n_vector)
x_bd= np.concatenate(x_bd_list, axis=0)
n_vector= np.concatenate(n_vector_list, 0)
#***********************************************************
# observation of u(x) on boundary
u_bd= 0.0
for i in range(dim):
u_bd= u_bd+(x_bd[:,i]-self.c_u[i])**2
u_bd= np.reshape(u_bd, [-1, 1])
#*********************************************************
# observation of a(x) on boundary
omega_a_bd= 0.0
for i in range(dim):
omega_a_bd= omega_a_bd+self.k[i]**2*(x_bd[:,i]-self.c_a[i])**2
exp_term_bd= np.exp((omega_a_bd-self.r**2)/self.alpha)
#
a_bd= (self.a2*(1-1/(1+exp_term_bd))+self.a1/(1+exp_term_bd))
a_bd= np.reshape(a_bd, [-1,1])
#********************************************************
train_dict={}
x_dm= np.float32(x_dm); train_dict['x_dm']= x_dm
f_dm= np.float32(f_dm); train_dict['f_dm']= f_dm
x_bd= np.float32(x_bd); train_dict['x_bd']= x_bd
u_bd= np.float32(u_bd); train_dict['u_bd']= u_bd
a_bd= np.float32(a_bd); train_dict['a_bd']= a_bd
n_vector= np.float32(n_vector); train_dict['n_vector']=n_vector
return(train_dict)
def sample_test(self, mesh_size, dim):
# 生成测试数据
low, up= self.low, self.up
#**********************************************************
# generate meshgrid in the domain
x_mesh= np.linspace(low, up, mesh_size)
mesh= np.meshgrid(x_mesh, x_mesh)
x1_dm= np.reshape(mesh[0], [-1,1])
x2_dm= np.reshape(mesh[1], [-1,1])
#
x3_dm= np.random.uniform(low, up, [self.mesh_size*self.mesh_size, dim-2])
x_dm= np.concatenate([x1_dm, x2_dm, x3_dm], axis=1)
x4_dm= np.zeros([self.mesh_size*self.mesh_size, dim-2])
x_draw_dm= np.concatenate([x1_dm, x2_dm, x4_dm], axis=1)
#***********************************************************
# The exact u(x)
u_dm= 0.0
u_draw_dm= 0.0
for i in range(dim):
u_dm= u_dm+(x_dm[:,i]-self.c_u[i])**2
u_draw_dm= u_draw_dm+(x_draw_dm[:,i]-self.c_u[i])**2
u_dm= np.reshape(u_dm, [-1, 1])
u_draw_dm= np.reshape(u_draw_dm, [-1, 1])
#***********************************************************
# The exact a(x)
omega_a= 0.0
omega_draw_a= 0.0
for i in range(dim):
omega_a= omega_a+self.k[i]**2*(x_dm[:,i]-self.c_a[i])**2
omega_draw_a= omega_draw_a+self.k[i]**2*(x_draw_dm[:,i]-self.c_a[i])**2
exp_term= np.exp((omega_a-self.r**2)/self.alpha)
exp_draw_term= np.exp((omega_draw_a-self.r**2)/self.alpha)
#
a_dm= (self.a2*(1-1/(1+exp_term))+self.a1/(1+exp_term))
a_dm= np.reshape(a_dm, [-1,1])
a_draw_dm= (self.a2*(1-1/(1+exp_draw_term))+self.a1/(1+exp_draw_term))
a_draw_dm= np.reshape(a_draw_dm, [-1,1])
#***********************************************************
test_dict={}
test_dict['mesh']= mesh
x_dm= np.float32(x_dm); test_dict['test_x']= x_dm
u_dm= | np.float32(u_dm) | numpy.float32 |
"""
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: <NAME>, <NAME>
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min= | np.min(alpha) | numpy.min |
# Created by <NAME> at 2021/5/27
import datetime
import logging
from glob import glob
import numpy as np
from pathlib import Path
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
try:
from sktime.forecasting.model_selection import temporal_train_test_split
except:
from pmdarima.model_selection import train_test_split as temporal_train_test_split
from data_helper import get_feature_dict, prepare_model_data, prepare_model_data_basline, gen_data_matrix, load_rb
from model import auto_tuned_model, baseline_model, grid_search_model
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%m/%d/ %H:%M:%S %p")
def evalute_model(y,
auto_tune=False,
interval_len=180,
gap=10,
loss_func=None):
if loss_func is None:
loss_func = "RMSE"
timeline = y.index
feature_files = glob(str(Path('data', '*.csv')))
feature_files.remove(str(Path('data', 'rb_continuous_contracts.csv')))
y_name = y.columns[0]
record = []
fitted_model = []
predictions = []
for i in range(0, len(timeline), interval_len + gap):
cur_interal = timeline[i: i + interval_len + gap][:-gap]
if len(cur_interal) < interval_len:
logging.info("The sample data has been exhausted")
continue
target = y.loc[cur_interal]
start = cur_interal[0]
start_ = cur_interal[0] - datetime.timedelta(days=10)
end = cur_interal[-1]
logging.info("Current interval {0} : {1}".format(start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d")))
feature_dict, cat_features, numerical_features = get_feature_dict(feature_files, start_, end)
df = gen_data_matrix(target, feature_dict)
df = df.dropna(how='all', axis=1)
df = df.loc[start: end]
df_train, df_test = temporal_train_test_split(df, test_size=0.2)
df_train_tree, df_val_tree = temporal_train_test_split(df, test_size=0.2)
X_test_tree, y_test_tree = prepare_model_data(df_test, y_name)
X_train_tree, y_train_tree = prepare_model_data(df_train_tree, y_name)
X_val_tree, y_val_tree = prepare_model_data(df_val_tree, y_name)
if auto_tune:
tree_model = auto_tuned_model(X_train_tree,
y_train_tree,
X_val_tree,
y_val_tree,
loss_func
)
else:
tree_model = grid_search_model(X_train_tree,
y_train_tree,
X_val_tree,
y_val_tree,
loss_func=loss_func
)
y_hat_tree = tree_model.predict(X_test_tree)
if loss_func == 'RMSE':
rmse_tree = mean_squared_error(y_test_tree, y_hat_tree)
if loss_func == 'MAE':
rmse_tree = mean_absolute_error(y_test_tree, y_hat_tree)
r2_tree = r2_score(y_test_tree, tree_model.predict(X_test_tree))
X_b, y_b = prepare_model_data_basline(df, y_name)
y_train_b, y_test_b, X_train_b, X_test_b = temporal_train_test_split(y_b, X_b, test_size=0.2)
baseline = baseline_model(X_train_b, y_train_b)
y_hat_baseline = baseline.predict(X_test_b)
if loss_func == 'RMSE':
rmse_baseline = mean_squared_error(y_test_b, y_hat_baseline)
rmse_naive = mean_squared_error(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
if loss_func == 'MAE':
rmse_baseline = mean_absolute_error(y_test_b, y_hat_baseline)
rmse_naive = mean_absolute_error(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
r2_baseline = r2_score(y_test_b, y_hat_baseline)
r2_naive = r2_score(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
y_hat = {
"tree_model": y_hat_tree,
"baseline": y_hat_baseline,
}
predictions.append(y_hat)
metrics = {loss_func + '_tree': rmse_tree,
loss_func + '_baseline': rmse_baseline,
loss_func + '_naive': rmse_naive,
'r2_tree': r2_tree,
'r2_baseline': r2_baseline,
'r2_naive': r2_naive,
'train_start': df_train.index[0],
'train_end': df_train.index[-1],
'test_start': df_test.index[0],
'test_end': df_test.index[-1],
}
log_string = "\n\t" + loss_func + "_tree: {0}, \n\t" + loss_func + "_baseline: {1},\n\t" + loss_func + \
"_naive: {2},\n\t" + "r2_tree: {3},\n\t" + "r2_baseline: {4}, \n\t" + "r2_naive: {5}"
logging.info(log_string.format(rmse_tree,rmse_baseline,rmse_naive,r2_tree,r2_baseline,r2_naive))
model = {"tree": tree_model, "baseline": baseline}
record.append(metrics)
fitted_model.append(model)
return record, fitted_model, predictions
def backtest(prediction_df, record_df, price, f):
"""
:param prediction_df:
:param record_df:
:param price:
:param f:
:return:
"""
pnls = []
rets = []
for i in range(len(prediction_df)):
prediction = prediction_df.iloc[i]
s, e = record_df[['test_start', 'test_end']].iloc[i].dt
price_ = price.loc[s:e]
pnls.append(_backtest(prediction, price_, f))
rets.append(_backtest2(prediction, price_, f))
pnls = np.hstack(pnls)
rets = np.hstack(rets)
glr, win_rate = cal_glr_wr(pnls)
sr = cal_sharpe_ratio(rets)
print("Gain-Loss Ratio: {:.2f} ".format(glr))
print("Winning Rate: {:.2f}% ".format(win_rate))
print("Sharpe Ratio: {:.2f} ".format(sr))
return glr, win_rate, sr
def _backtest2(prediction, price, acct_num,):
"""
Cal daiy return in % form
:param prediction:
:param price:
:param acct_num:
:return:
"""
# starting net val for trading account
mat = np.ones((acct_num, len(price)))
# liquidate or build position time
_idx = np.arange(len(price))
# price change
_chg = price.pct_change()
for i in range(acct_num):
adjust_time = _idx[i::acct_num]
for j, k in zip(adjust_time, np.hstack((adjust_time[1:], [-1]))):
sign = np.sign(prediction[j])
if k != -1:
mat[i][j+1:k+1] = 1+sign * _chg[j+1: k+1]
else:
mat[i][j+1:] = 1+ sign * _chg[j+1: ]
mat = mat.cumprod(1).sum(0)
mat /= mat[0]
# daily return in % form.
return 100 * np.diff(mat)/mat[:-1]
def _backtest(prediction, price, f):
'''
PnL for each trade
:param prediction:
:param start:
:param end:
:param price:
:param f:
:return:
'''
pos = np.where(prediction > 0, 1, -1)[:-f]
chg = price.diff(f).dropna()
pnl = chg * pos
return pnl
def cal_glr_wr(pnl):
glr = -pnl[pnl > 0].mean() / pnl[pnl < 0].mean()
win_rate = len(pnl[pnl > 0]) / len(pnl)
return glr, 100 * win_rate
def cal_sharpe_ratio(r):
return np.sqrt(252) * r.mean() / r.std()
def evaluate_model2(y,
price,
auto_tune=False,
interval_len=180,
test_size=0.2,
loss_func=None):
if loss_func is None:
loss_func = "RMSE"
timeline = y.index
feature_files = glob(str(Path('data', '*.csv')))
feature_files.remove(str(Path('data', 'rb_continuous_contracts.csv')))
y_name = y.columns[0]
logging.info("The target is {0}".format(y_name))
record = []
fitted_model = []
predictions = []
test_price = []
start = 0
end = start + interval_len
while (end < len(timeline)):
current_interval = timeline[start: end]
logging.info("Current interval {0} : {1}".format(current_interval[0].strftime("%Y-%m-%d"),
current_interval[-1].strftime("%Y-%m-%d")))
target = y.loc[current_interval]
s, e = current_interval[0], current_interval[-1]
feature_dict, cat_features, numerical_features = get_feature_dict(feature_files, s, e)
df = gen_data_matrix(target, feature_dict)
df = df.dropna(how='all', axis=1)
df = df.loc[s: e]
df_train, df_test = temporal_train_test_split(df, test_size=test_size)
df_train_tree, df_val_tree = temporal_train_test_split(df, test_size=0.2)
X_test_tree, y_test_tree = prepare_model_data(df_test, y_name)
X_train_tree, y_train_tree = prepare_model_data(df_train_tree, y_name)
X_val_tree, y_val_tree = prepare_model_data(df_val_tree, y_name)
test_price.append(price.loc[df_test.index].values)
if auto_tune:
tree_model = auto_tuned_model(X_train_tree,
y_train_tree,
X_val_tree,
y_val_tree,
loss_func=loss_func
)
else:
tree_model = grid_search_model(X_train_tree,
y_train_tree,
X_val_tree,
y_val_tree,
loss_func=loss_func
)
y_hat_tree = tree_model.predict(X_test_tree)
if loss_func == 'RMSE':
rmse_tree_test = mean_squared_error(y_test_tree, y_hat_tree)
rmse_tree_train = mean_squared_error(y_train_tree, tree_model.predict(X_train_tree))
if loss_func == 'MAE':
rmse_tree_test = mean_absolute_error(y_test_tree, y_hat_tree)
rmse_tree_train = mean_absolute_error(y_train_tree, tree_model.predict(X_train_tree))
r2_tree_test = r2_score(y_test_tree, tree_model.predict(X_test_tree))
r2_tree_train = r2_score(y_train_tree, tree_model.predict(X_train_tree))
X_b, y_b = prepare_model_data_basline(df, y_name)
y_train_b, y_test_b, X_train_b, X_test_b = temporal_train_test_split(y_b, X_b, test_size=test_size)
baseline = baseline_model(X_train_b, y_train_b)
y_hat_baseline = baseline.predict(X_test_b)
if loss_func == 'RMSE':
rmse_baseline_test = mean_squared_error(y_test_b, y_hat_baseline)
rmse_baseline_train = mean_squared_error(y_train_b, baseline.predict(X_train_b))
rmse_naive_test = mean_squared_error(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
rmse_naive_train = mean_squared_error(df_train[y_name], [df_train[y_name].mean()] * len(df_train))
if loss_func == 'MAE':
rmse_baseline_test = mean_absolute_error(y_test_b, y_hat_baseline)
rmse_baseline_train = mean_absolute_error(y_train_b, baseline.predict(X_train_b))
rmse_naive_test = mean_absolute_error(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
rmse_naive_train = mean_absolute_error(df_train[y_name], [df_train[y_name].mean()] * len(df_train))
r2_baseline_test, r2_baseline_train = r2_score(y_test_b, y_hat_baseline), r2_score(y_train_b,
baseline.predict(X_train_b))
r2_naive_test = r2_score(df_test[y_name], [df_train[y_name].mean()] * len(df_test))
r2_naive_train = r2_score(df_train[y_name], [df_train[y_name].mean()] * len(df_train))
y_hat = {
"tree_model": y_hat_tree,
"baseline": y_hat_baseline,
}
predictions.append(y_hat)
metrics = {loss_func + '_tree' + '_test': rmse_tree_test,
loss_func + '_tree' + '_train': rmse_tree_train,
loss_func + '_baseline' + '_test': rmse_baseline_test,
loss_func + '_baseline' + '_train': rmse_baseline_train,
loss_func + '_naive' + '_test': rmse_naive_test,
loss_func + '_naive' + '_train': rmse_naive_train,
'r2_tree_test': r2_tree_test,
'r2_tree_train': r2_tree_train,
'r2_baseline_test': r2_baseline_test,
'r2_baseline_train': r2_baseline_train,
'r2_naive_test': r2_naive_test,
'r2_naive_train': r2_naive_train,
'train_start': df_train.index[0],
'train_end': df_train.index[-1],
'test_start': df_test.index[0],
'test_end': df_test.index[-1],
}
log_string = []
for k, v in metrics.items():
if k not in ['train_start', 'train_end', 'test_start', 'test_end']:
sub_str = '='.join((k, str(v)))
log_string.append(sub_str)
log_string = '\n\t'.join(log_string)
log_string = '\n\t' + log_string
logging.info(log_string)
model = {"tree": tree_model, "baseline": baseline}
record.append(metrics)
fitted_model.append(model)
end = end + len(df_test)
start = end - interval_len
else:
logging.info("The sample data has been exhausted")
try:
# Backtest Statistics
test_price = np.array(test_price).flatten()
predictions_df = pd.DataFrame(predictions)
predictions_tree = np.concatenate(predictions_df["tree_model"].tolist())
predictions_baseline = np.concatenate(predictions_df["baseline"].tolist())
try:
assert len(test_price) == len(predictions_tree)
assert len(test_price) == len(predictions_baseline)
except AssertionError:
logging.critical("Check the Prediction Array")
logging.info("Lenght of test price {0}".format(str(len(price))))
logging.info("Lenght of predictions {0}".format(str(len(predictions_tree))))
acct_num = int(y_name.split('_')[-1])
tree_result = backtest3(predictions_tree, test_price, acct_num)
baseline_result = backtest3(predictions_baseline, test_price, acct_num)
backtest_result = {'tree': tree_result, 'baseline': baseline_result}
except:
backtest_result = None
return record, fitted_model, predictions, backtest_result
def backtest3(prediction, price, acct_num):
"""
:param prediction_df:
:param record_df:
:param price:
:param f:
:return:
"""
position = np.where(prediction > 0, 1, -1)[:-acct_num]
chg = price[acct_num:] - price[:-acct_num]
pnl = chg * position
glr = -pnl[pnl > 0].mean() / pnl[pnl < 0].mean()
win_rate = 100 * len(pnl[pnl > 0]) / len(pnl)
daily_ret = cal_daily_return(prediction, price, acct_num)
sharpe_ratio = np.sqrt(252) * daily_ret.mean() / daily_ret.std()
annualized_return = ((1 + daily_ret.mean() / 100) ** 252 - 1) * 100
max_down = max_draw_down(daily_ret)
items = {'gain_loss_ratio': glr,
'winning_rate': win_rate,
'sharpe_ratio': sharpe_ratio,
'annualized_return': annualized_return,
'max_drawdown': max_down,
'daily_ret': daily_ret}
return items
def cal_annualized_return(mean_ret):
return ((1 + mean_ret) ** 252 - 1) * 100
def max_draw_down(ret: np.array):
nv = (1+ret/100).cumprod()
nv = np.insert(nv, 0, 1)
return -100 * (1 - (nv/np.maximum.accumulate(nv))).max()
def cal_daily_return(prediction, price, acct_num):
# starting net val for trading account
mat = np.ones((acct_num, len(price)))
# liquidate or build position time
_idx = np.arange(len(price))
# price change
_chg = pd.Series(price).pct_change()
for i in range(acct_num):
adjust_time = _idx[i::acct_num]
for j, k in zip(adjust_time, np.hstack((adjust_time[1:], [-1]))):
sign = np.sign(prediction[j])
if k != -1:
mat[i][j + 1:k + 1] = 1 + sign * _chg[j + 1: k + 1]
else:
mat[i][j + 1:] = 1 + sign * _chg[j + 1:]
mat = mat.cumprod(1).sum(0)
mat /= mat[0]
# daily return in % form.
return 100 * | np.diff(mat) | numpy.diff |
from __future__ import print_function, unicode_literals, absolute_import, division
import os
import sys
import re
import numpy as np
import numexpr as ne
from .base import BaseValidationTest, TestResult
from .plotting import plt
from astropy.table import Table
from scipy.spatial import distance_matrix
import ot
from numba import jit
import matplotlib as mpl
__all__ = ['CheckColors']
# Transformations of DES -> SDSS and DES -> CFHT are derived from Equations A9-12 and
# A19-22 the paper: arxiv.org/abs/1708.01531
# Transformations of SDSS -> CFHT are from:
# http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
color_transformation = {'des2sdss': {}, 'des2cfht': {}, 'sdss2cfht': {}, 'lsst2cfht': {}, 'lsst2sdss':{}, 'sdss2lsst':{}, 'cfht2sdss':{}, 'cfht2lsst':{}}
color_transformation['des2sdss']['g'] = '1.10421 * g - 0.104208 * r'
color_transformation['des2sdss']['r'] = '0.102204 * g + 0.897796 * r'
color_transformation['des2sdss']['i'] = '1.30843 * i - 0.308434 * z'
color_transformation['des2sdss']['z'] = '0.103614 * i + 0.896386 * z'
color_transformation['des2cfht']['g'] = '0.945614 * g + 0.054386 * r'
color_transformation['des2cfht']['r'] = '0.0684211 * g + 0.931579 * r'
color_transformation['des2cfht']['i'] = '1.18646 * i - 0.186458 * z'
color_transformation['des2cfht']['z'] = '0.144792 * i + 0.855208 * z'
color_transformation['sdss2cfht']['u'] = 'u - 0.241 * (u - g)'
color_transformation['sdss2cfht']['g'] = 'g - 0.153 * (g - r)'
color_transformation['sdss2cfht']['r'] = 'r - 0.024 * (g - r)'
color_transformation['sdss2cfht']['i'] = 'i - 0.085 * (r - i)'
color_transformation['sdss2cfht']['z'] = 'z + 0.074 * (i - z)'
color_transformation['cfht2sdss']['u'] = 'u + 0.342 * (u - g)'
color_transformation['cfht2sdss']['g'] = 'g + 0.014 + 0.133 * (g - r) + 0.031 * (g - r) * (g - r)'
color_transformation['cfht2sdss']['r'] = 'r + 0.05 * (r - i)'
color_transformation['cfht2sdss']['i'] = 'i + 0.087 * (r - i)'
color_transformation['cfht2sdss']['z'] = 'z - 0.057 * (i - z)'
#these were derived from cosmoDC2 GCRCatalogs version = 0.14.4
color_transformation['lsst2sdss']['u'] = '0.203 * (u - g) + u + 0.04'
color_transformation['lsst2sdss']['g'] = '0.119 * (g - r) + g + 0.001'
color_transformation['lsst2sdss']['r'] = '0.025 * (r - i) + r + 0.001'
color_transformation['lsst2sdss']['i'] = '0.013 * (i - z) + i + 0.001'
color_transformation['lsst2sdss']['z'] = '-0.031 * (z - y) + z + 0.001'
color_transformation['sdss2lsst']['u'] = '0.932 * u + 1.865'
color_transformation['sdss2lsst']['g'] = '-0.11 * (g - r) + g + 0.001'
color_transformation['sdss2lsst']['r'] = '-0.026 * (r - i) + r - 0.001'
color_transformation['sdss2lsst']['i'] = '-0.01 * (i - z) + i'
color_transformation['sdss2lsst']['z'] = '1.001 * z + 0.043'
#for these I combined the transformations above, CFHT actually should be MegaCam
color_transformation['cfht2lsst']['u'] = '1.251 * u - 0.319 * g + 1.865'
color_transformation['cfht2lsst']['g'] = 'g + 0.00837 * (g - r) + 0.028 * (g - r) * (g - r) + 0.0055 * (r - i) + 0.013'
color_transformation['cfht2lsst']['r'] = 'r - 0.02 * (r - i) - 0.001'
color_transformation['cfht2lsst']['i'] = 'i + 0.086 * (r - i) - 0.00943 * (i - z)'
color_transformation['cfht2lsst']['z'] = '1.058 * z - 0.057 * i + 0.043'
class kernelCompare:
def __init__(self,D1, D2):
self._D1 = D1
self._D2 = D2
self._XY = np.vstack((D1, D2))
self._scale = self._computeScale(self._XY)
self._n1 = len(D1)
self._n2 = len(D2)
def _computeScale(self,XY):
'''Compute and determine the kernel parameter by
mean absolute deviation
'''
Z = XY - np.mean(XY,0)
Z = np.abs(Z)
scaleXY = | np.median(Z, 0) | numpy.median |
import logging
import math
import hashlib
from functools import partial
from inspect import getmembers
from itertools import repeat
import numpy as np
import datajoint as dj
import scipy.stats as sc_stats
from . import lab
from . import experiment
from . import ephys
[lab, experiment, ephys] # NOQA
from . import get_schema_name
schema = dj.schema(get_schema_name('psth'))
log = logging.getLogger(__name__)
# NOW:
# - rework Condition to TrialCondition funtion+arguments based schema
def key_hash(key):
"""
Given a dictionary `key`, returns an md5 hash string of its values.
For use in building dictionary-keyed tables.
"""
hashed = hashlib.md5()
for k, v in sorted(key.items()):
hashed.update(str(v).encode())
return hashed.hexdigest()
@schema
class TrialCondition(dj.Lookup):
'''
TrialCondition: Manually curated condition queries.
Used to define sets of trials which can then be keyed on for downstream
computations.
'''
definition = """
trial_condition_name: varchar(128) # user-friendly name of condition
---
trial_condition_hash: varchar(32) # trial condition hash - hash of func and arg
unique index (trial_condition_hash)
trial_condition_func: varchar(36) # trial retrieval function
trial_condition_arg: longblob # trial retrieval arguments
"""
@property
def contents(self):
contents_data = [
{
'trial_condition_name': 'good_noearlylick_hit',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'task': 'audio delay',
'task_protocol': 1,
'outcome': 'hit',
'early_lick': 'no early'}
},
{
'trial_condition_name': 'good_noearlylick_left_hit',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'task': 'audio delay',
'task_protocol': 1,
'outcome': 'hit',
'early_lick': 'no early',
'trial_instruction': 'left'}
},
{
'trial_condition_name': 'good_noearlylick_right_hit',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'task': 'audio delay',
'task_protocol': 1,
'outcome': 'hit',
'early_lick': 'no early',
'trial_instruction': 'right'}
},
{
'trial_condition_name': 'good_noearlylick_left_miss',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'task': 'audio delay',
'task_protocol': 1,
'outcome': 'miss',
'early_lick': 'no early',
'trial_instruction': 'left'}
},
{
'trial_condition_name': 'good_noearlylick_right_miss',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'task': 'audio delay',
'task_protocol': 1,
'outcome': 'miss',
'early_lick': 'no early',
'trial_instruction': 'right'}
},
{
'trial_condition_name': 'all_noearlylick_nostim',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'_outcome': 'ignore',
'task': 'audio delay',
'task_protocol': 1,
'early_lick': 'no early'}
},
{
'trial_condition_name': 'all_noearlylick_nostim_left',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'_outcome': 'ignore',
'task': 'audio delay',
'task_protocol': 1,
'early_lick': 'no early',
'trial_instruction': 'left'}
},
{
'trial_condition_name': 'all_noearlylick_nostim_right',
'trial_condition_func': '_get_trials_exclude_stim',
'trial_condition_arg': {
'_outcome': 'ignore',
'task': 'audio delay',
'task_protocol': 1,
'early_lick': 'no early',
'trial_instruction': 'right'}
}
]
# PHOTOSTIM conditions
stim_locs = ['left_alm', 'right_alm', 'both_alm']
for loc in stim_locs:
for instruction in (None, 'left', 'right'):
condition = {'trial_condition_name': '_'.join(filter(None, ['all', 'noearlylick', loc, 'stim',
instruction])),
'trial_condition_func': '_get_trials_include_stim',
'trial_condition_arg': {
**{'_outcome': 'ignore',
'task': 'audio delay',
'task_protocol': 1,
'early_lick': 'no early',
'brain_location_name': loc},
**({'trial_instruction': instruction} if instruction else {})}
}
contents_data.append(condition)
return ({**d, 'trial_condition_hash':
key_hash({'trial_condition_func': d['trial_condition_func'],
**d['trial_condition_arg']})}
for d in contents_data)
@classmethod
def get_trials(cls, trial_condition_name):
return cls.get_func({'trial_condition_name': trial_condition_name})()
@classmethod
def get_cond_name_from_keywords(cls, keywords):
matched_cond_names = []
for cond_name in cls.fetch('trial_condition_name'):
match = True
tmp_cond = cond_name
for k in keywords:
if k in tmp_cond:
tmp_cond = tmp_cond.replace(k, '')
else:
match = False
break
if match:
matched_cond_names.append(cond_name)
return sorted(matched_cond_names)
@classmethod
def get_func(cls, key):
self = cls()
func, args = (self & key).fetch1(
'trial_condition_func', 'trial_condition_arg')
return partial(dict(getmembers(cls))[func], **args)
@classmethod
def _get_trials_exclude_stim(cls, **kwargs):
# Note: inclusion (attr) is AND - exclusion (_attr) is OR
log.debug('_get_trials_exclude_stim: {}'.format(kwargs))
restr, _restr = {}, {}
for k, v in kwargs.items():
if k.startswith('_'):
_restr[k[1:]] = v
else:
restr[k] = v
stim_attrs = set(experiment.Photostim.heading.names) - set(experiment.Session.heading.names)
behav_attrs = set(experiment.BehaviorTrial.heading.names)
_stim_key = {k: v for k, v in _restr.items() if k in stim_attrs}
_behav_key = {k: v for k, v in _restr.items() if k in behav_attrs}
stim_key = {k: v for k, v in restr.items() if k in stim_attrs}
behav_key = {k: v for k, v in restr.items() if k in behav_attrs}
return (((experiment.BehaviorTrial & behav_key) - [{k: v} for k, v in _behav_key.items()]) -
(experiment.PhotostimEvent * (experiment.Photostim & stim_key) - [{k: v} for k, v in _stim_key.items()]).proj())
@classmethod
def _get_trials_include_stim(cls, **kwargs):
# Note: inclusion (attr) is AND - exclusion (_attr) is OR
log.debug('_get_trials_include_stim: {}'.format(kwargs))
restr, _restr = {}, {}
for k, v in kwargs.items():
if k.startswith('_'):
_restr[k[1:]] = v
else:
restr[k] = v
stim_attrs = set(experiment.Photostim.heading.names) - set(experiment.Session.heading.names)
behav_attrs = set(experiment.BehaviorTrial.heading.names)
_stim_key = {k: v for k, v in _restr.items() if k in stim_attrs}
_behav_key = {k: v for k, v in _restr.items() if k in behav_attrs}
stim_key = {k: v for k, v in restr.items() if k in stim_attrs}
behav_key = {k: v for k, v in restr.items() if k in behav_attrs}
return (((experiment.BehaviorTrial & behav_key) - [{k: v} for k, v in _behav_key.items()]) &
(experiment.PhotostimEvent * (experiment.Photostim & stim_key) - [{k: v} for k, v in _stim_key.items()]).proj())
@schema
class UnitPsth(dj.Computed):
definition = """
-> TrialCondition
-> ephys.Unit
---
unit_psth=NULL: longblob
"""
psth_params = {'xmin': -3, 'xmax': 3, 'binsize': 0.04}
def make(self, key):
log.debug('UnitPsth.make(): key: {}'.format(key))
# expand TrialCondition to trials,
trials = TrialCondition.get_trials(key['trial_condition_name'])
# fetch related spike times
q = (ephys.Unit.TrialSpikes & key & trials.proj())
spikes = q.fetch('spike_times')
if len(spikes) == 0:
log.warning('no spikes found for key {} - null psth'.format(key))
self.insert1(key)
return
# compute psth & store.
# XXX: xmin, xmax+bins (149 here vs 150 in matlab)..
# See also [:1] slice in plots..
unit_psth = self.compute_psth(spikes)
self.insert1({**key, 'unit_psth': unit_psth})
@staticmethod
def compute_psth(session_unit_spikes):
spikes = np.concatenate(session_unit_spikes)
xmin, xmax, bins = UnitPsth.psth_params.values()
psth = list(np.histogram(spikes, bins=np.arange(xmin, xmax, bins)))
psth[0] = psth[0] / len(session_unit_spikes) / bins
return np.array(psth)
@classmethod
def get_plotting_data(cls, unit_key, condition_key):
"""
Retrieve / build data needed for a Unit PSTH Plot based on the given
unit condition and included / excluded condition (sub-)variables.
Returns a dictionary of the form:
{
'trials': ephys.Unit.TrialSpikes.trials,
'spikes': ephys.Unit.TrialSpikes.spikes,
'psth': UnitPsth.unit_psth,
'raster': Spike * Trial raster [np.array, np.array]
}
"""
# from sys import exit as sys_exit # NOQA
# from code import interact
# from collections import ChainMap
# interact('unitpsth make', local=dict(ChainMap(locals(), globals())))
trials = TrialCondition.get_func(condition_key)()
unit_psth = (UnitPsth & {**condition_key, **unit_key}).fetch1()['unit_psth']
if unit_psth is None:
raise Exception('No spikes found for this unit and trial-condition')
psth, edges = unit_psth
spikes, trials = (ephys.Unit.TrialSpikes & trials & unit_key).fetch(
'spike_times', 'trial', order_by='trial asc')
raster = [np.concatenate(spikes),
np.concatenate([[t] * len(s)
for s, t in zip(spikes, trials)])]
return dict(trials=trials, spikes=spikes, psth=(psth, edges[1:]), raster=raster)
@schema
class Selectivity(dj.Lookup):
"""
Selectivity lookup values
"""
definition = """
selectivity: varchar(24)
"""
contents = zip(['contra-selective', 'ipsi-selective', 'non-selective'])
@schema
class PeriodSelectivity(dj.Computed):
"""
Multi-trial selectivity for a specific trial subperiod
"""
definition = """
-> ephys.Unit
-> experiment.Period
---
-> Selectivity.proj(period_selectivity='selectivity')
ipsi_firing_rate: float # mean firing rate of all ipsi-trials
contra_firing_rate: float # mean firing rate of all contra-trials
p_value: float # all trial spike rate t-test p-value
"""
alpha = 0.05 # default alpha value
key_source = experiment.Period * (ephys.Unit & 'unit_quality != "all"')
def make(self, key):
'''
Compute Period Selectivity for a given unit.
'''
log.debug('PeriodSelectivity.make(): key: {}'.format(key))
# Verify insertion location is present,
egpos = None
try:
egpos = (ephys.ProbeInsertion.InsertionLocation
* experiment.BrainLocation & key).fetch1()
except dj.DataJointError as e:
if 'exactly one tuple' in repr(e):
log.error('... Insertion Location missing. skipping')
return
# retrieving the spikes of interest,
spikes_q = ((ephys.Unit.TrialSpikes & key)
* (experiment.BehaviorTrial()
& {'task': 'audio delay'}
& {'early_lick': 'no early'}
& {'outcome': 'hit'}) - experiment.PhotostimEvent)
if not spikes_q: # no spikes found
self.insert1({**key, 'period_selectivity': 'non-selective'})
return
# retrieving event times
start_event, start_tshift, end_event, end_tshift = (experiment.Period & key).fetch1(
'start_event_type', 'start_time_shift', 'end_event_type', 'end_time_shift')
start_event_q = {k['trial']: float(k['start_event_time'])
for k in (experiment.TrialEvent & key & {'trial_event_type': start_event}).proj(
start_event_time=f'trial_event_time + {start_tshift}').fetch(as_dict=True)}
end_event_q = {k['trial']: float(k['end_event_time'])
for k in (experiment.TrialEvent & key & {'trial_event_type': end_event}).proj(
end_event_time=f'trial_event_time + {end_tshift}').fetch(as_dict=True)}
cue_event_q = {k['trial']: float(k['trial_event_time'])
for k in (experiment.TrialEvent & key & {'trial_event_type': 'go'}).fetch(as_dict=True)}
# compute spike rate during the period-of-interest for each trial
freq_i, freq_c = [], []
for trial, trial_instruct, spike_times in zip(*spikes_q.fetch('trial', 'trial_instruction', 'spike_times')):
start_time = start_event_q[trial] - cue_event_q[trial]
stop_time = end_event_q[trial] - cue_event_q[trial]
spk_rate = np.logical_and(spike_times >= start_time, spike_times < stop_time).sum() / (stop_time - start_time)
if egpos['hemisphere'] == trial_instruct:
freq_i.append(spk_rate)
else:
freq_c.append(spk_rate)
# and testing for selectivity.
t_stat, pval = sc_stats.ttest_ind(freq_i, freq_c, equal_var=True)
freq_i_m = np.average(freq_i)
freq_c_m = np.average(freq_c)
pval = 1 if np.isnan(pval) else pval
if pval > self.alpha:
pref = 'non-selective'
else:
pref = ('ipsi-selective' if freq_i_m > freq_c_m
else 'contra-selective')
self.insert1({**key, 'p_value': pval,
'period_selectivity': pref,
'ipsi_firing_rate': freq_i_m,
'contra_firing_rate': freq_c_m})
@schema
class UnitSelectivity(dj.Computed):
"""
Multi-trial selectivity at unit level
"""
definition = """
-> ephys.Unit
---
-> Selectivity.proj(unit_selectivity='selectivity')
"""
# Unit Selectivity is computed only for units
# that has PeriodSelectivity computed for "sample" and "delay" and "response"
key_source = (ephys.Unit
& (PeriodSelectivity & 'period = "sample"')
& (PeriodSelectivity & 'period = "delay"')
& (PeriodSelectivity & 'period = "response"'))
def make(self, key):
'''
calculate 'global' selectivity for a unit -
'''
log.debug('UnitSelectivity.make(): key: {}'.format(key))
# fetch region selectivity,
sels = (PeriodSelectivity & key).fetch('period_selectivity')
if (sels == 'non-selective').all():
log.debug('... no UnitSelectivity for unit')
self.insert1({**key, 'unit_selectivity': 'non-selective'})
return
contra_frate, ipsi_frate = (PeriodSelectivity & key & 'period in ("sample", "delay", "response")').fetch(
'contra_firing_rate', 'ipsi_firing_rate')
pref = ('ipsi-selective' if ipsi_frate.mean() > contra_frate.mean() else 'contra-selective')
log.debug('... prefers: {}'.format(pref))
self.insert1({**key, 'unit_selectivity': pref})
def compute_unit_psth(unit_key, trial_keys, per_trial=False):
"""
Compute unit-level psth for the specified unit and trial-set - return (time,)
If per_trial == True, compute trial-level psth - return ((trial x time), time_vec)
"""
q = (ephys.Unit.TrialSpikes & unit_key & trial_keys)
if not q:
return None
xmin, xmax, bin_size = UnitPsth.psth_params.values()
binning = np.arange(xmin, xmax, bin_size)
spikes = q.fetch('spike_times')
if per_trial:
trial_psth = np.vstack(np.histogram(spike, bins=binning)[0] / bin_size for spike in spikes)
return trial_psth, binning[1:]
else:
spikes = np.concatenate(spikes)
psth, edges = np.histogram(spikes, bins=binning)
psth = psth / len(q) / bin_size
return psth, edges[1:]
def compute_coding_direction(contra_psths, ipsi_psths, time_period=None):
"""
Coding direction here is a vector of length: len(unit_keys)
This coding direction vector (vcd) is the normalized difference between contra-trials firing rate
and ipsi-trials firing rate per unit, within the specified time period
:param contra_psths: unit# x (trial-ave psth, psth_edge)
:param ipsi_psths: unit# x (trial-ave psth, psth_edge)
"""
if not time_period:
contra_tmin, contra_tmax = zip(*((k[1].min(), k[1].max()) for k in contra_psths))
ipsi_tmin, ipsi_tmax = zip(*((k[1].min(), k[1].max()) for k in ipsi_psths))
time_period = max(min(contra_tmin), min(ipsi_tmin)), min(max(contra_tmax), max(ipsi_tmax))
p_start, p_end = time_period
contra_ave_spk_rate = np.array([spk_rate[np.logical_and(spk_edge >= p_start, spk_edge < p_end)].mean()
for spk_rate, spk_edge in contra_psths])
ipsi_ave_spk_rate = np.array([spk_rate[ | np.logical_and(spk_edge >= p_start, spk_edge < p_end) | numpy.logical_and |
import numpy as np
import scipy.linalg
import matplotlib.pyplot as plt
def dlqr(A, B, Q, R, gamma=1):
"""
Solves for the optimal infinite-horizon, discrete-time LQR controller
given linear system (A,B) and cost function parameterized by (Q,R)
"""
P = scipy.linalg.solve_discrete_are(np.sqrt(gamma) * A, B, Q, R / gamma)
F = -gamma * np.matmul(scipy.linalg.inv(gamma * np.matmul(np.matmul(B.T, P), B) + R),
(np.matmul(np.matmul(B.T, P), A)))
return F, P
DIM = 10
#SEED = 11
#np.random.seed(SEED)
A = np.random.randn(DIM, DIM)
B = np.random.randn(DIM, DIM)
#B = np.identity(DIM)
Q = np.identity(DIM)
R = np.identity(DIM)
K = np.random.randn(DIM, DIM)
K_0 = np.copy(K)
#print(f"K = {K}")
gamma = min(1 / max(np.abs(np.linalg.eig(A + B@K)[0])) ** 2, 1)
gamma_0 = gamma
print(f"gamma = {gamma}")
print("\n")
K_star, P_star = dlqr(A, B, Q, R)
print(f"iter bound: {2*np.trace(P_star)/min(np.linalg.eig(Q)[0]) * np.log(1/np.sqrt(gamma))}")
print(f"iter bound: {2*max(np.linalg.eig(P_star)[0])/min(np.linalg.eig(Q)[0]) * np.log(1/np.sqrt(gamma))}")
print("\n")
i=0
while max(np.abs(np.linalg.eig(A + B@K)[0])) ** 2 >= 1:
i += 1
K, P = dlqr(A, B, Q, R, gamma)
# print(f"K' = {K}")
print("optimal to initial:"
f"{np.abs(max(np.linalg.eig(A + B@K)[0])) / np.abs(max(np.linalg.eig(A + B@K_0)[0]))}")
print("estimate bound:"
f"{np.sqrt(1 - min(np.linalg.eig(Q + K.T@R@K)[0]) / max(np.linalg.eig(P)[0]))}")
print("uniform bound:"
f"{np.sqrt(1-(min(np.linalg.eig(Q)[0])/np.trace(P_star)))}")
print("\n")
gamma = min(1 / max(np.abs(np.linalg.eig(A + B@K)[0])) ** 2, 1)
print(f"gamma = {gamma}")
K_0 = np.copy(K)
#print(f"K* = {K_star}")
print(f"real num iter: {i}")
# %% improvement ratio v bound plot
x = np.linspace(0.01, 1, 500)
true_ratios = np.array(
[np.sqrt(gamma) * max(np.abs(np.linalg.eig(A + B @ dlqr(A, B, Q, R, gamma)[0])[0])) for gamma in x]).squeeze()
temp = []
for gamma in x:
k, p = dlqr(A, B, Q, R, gamma)
temp.append(np.sqrt(1 - min(np.linalg.eig(Q + k.T@R@k)[0]) / max(np.linalg.eig(p)[0])))
est_ratios = np.array(temp).squeeze()
ub_ratios = np.array([np.sqrt(
1-(min( | np.linalg.eig(Q) | numpy.linalg.eig |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# General imports.
import numpy as np
from warnings import warn
from scipy.integrate import AccuracyWarning
from scipy.sparse import find, diags, identity, csr_matrix
from scipy.sparse.linalg import spsolve
from scipy.interpolate import interp1d, RectBivariateSpline
# Plotting.
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# ==============================================================================
# Code for generating indices on the oversampled wavelength grid.
# ==============================================================================
def arange_2d(starts, stops, dtype=None):
"""Create a 2D array containing a series of ranges. The ranges do not have
to be of equal length.
:param starts: start values for each range.
:param stops: end values for each range.
:param dtype: the type of the output values.
:type starts: int or array[int]
:type stops: int or array[int]
:type dtype: str
:returns: out, mask - 2D array of ranges and a mask indicating valid
elements.
:rtype: Tuple(array[int], array[bool])
"""
# Ensure starts and stops are arrays.
starts = np.asarray(starts)
stops = np.asarray(stops)
# Check input for starts and stops is valid.
if (starts.shape != stops.shape) & (starts.shape != ()):
msg = ('Shapes of starts and stops are not compatible, '
'they must either have the same shape or starts must be scalar.')
raise ValueError(msg)
if np.any(stops < starts):
msg = 'stops must be everywhere greater or equal to starts.'
raise ValueError(msg)
# If starts was given as a scalar match its shape to stops.
if starts.shape == ():
starts = starts * np.ones_like(stops)
# Compute the length of each range.
lengths = (stops - starts).astype(int)
# Initialize the output arrays.
nrows = len(stops)
ncols = np.amax(lengths)
out = np.ones((nrows, ncols), dtype=dtype)
mask = np.ones((nrows, ncols), dtype='bool')
# Compute the indices.
for irow in range(nrows):
out[irow, :lengths[irow]] = np.arange(starts[irow], stops[irow])
mask[irow, :lengths[irow]] = False
return out, mask
# ==============================================================================
# Code for converting to a sparse matrix and back.
# ==============================================================================
def sparse_k(val, k, n_k):
"""
Transform a 2D array `val` to a sparse matrix.
`k` is use for the position in the second axis
of the matrix. The resulting sparse matrix will
have the shape : ((len(k), n_k))
Set k elements to a negative value when not defined
"""
# Length of axis 0
n_i = len(k)
# Get row index
i_k = np.indices(k.shape)[0]
# Take only well defined coefficients
row = i_k[k >= 0]
col = k[k >= 0]
data = val[k >= 0]
mat = csr_matrix((data, (row, col)), shape=(n_i, n_k))
return mat
def unsparse(matrix, fill_value=np.nan):
"""
Convert a sparse matrix to a 2D array of values and a 2D array of position.
Returns
------
out: 2d array
values of the matrix. The shape of the array is given by:
(matrix.shape[0], maximum number of defined value in a column).
col_out: 2d array
position of the columns. Same shape as `out`.
"""
col, row, val = find(matrix.T)
n_row, n_col = matrix.shape
good_rows, counts = np.unique(row, return_counts=True)
# Define the new position in columns
i_col = np.indices((n_row, counts.max()))[1]
i_col = i_col[good_rows]
i_col = i_col[i_col < counts[:, None]]
# Create outputs and assign values
col_out = np.ones((n_row, counts.max()), dtype=int) * -1
col_out[row, i_col] = col
out = np.ones((n_row, counts.max())) * fill_value
out[row, i_col] = val
return out, col_out
# ==============================================================================
# Code for building wavelength grids.
# ==============================================================================
def get_wave_p_or_m(wave_map):
# TODO rename function?
"""Compute lambda_plus and lambda_minus of pixel map, given the pixel
central value.
:param wave_map: Array of the pixel wavelengths for a given order.
:type wave_map: array[float]
:returns: wave_plus, wave_minus - The wavelength edges of each pixel,
given the central value.
:rtype: Tuple(array[float], array[float])
"""
wave_map = wave_map.T # Simpler to use transpose
# Iniitialize arrays.
wave_left = np.zeros_like(wave_map)
wave_right = np.zeros_like(wave_map)
# Compute the change in wavelength.
delta_wave = np.diff(wave_map, axis=0)
# Compute the wavelength values on the left and right edges of each pixel.
wave_left[1:] = wave_map[:-1] + delta_wave/2 # TODO check this logic.
wave_left[0] = wave_map[0] - delta_wave[0]/2
wave_right[:-1] = wave_map[:-1] + delta_wave/2
wave_right[-1] = wave_map[-1] + delta_wave[-1]/2
# The outputs depend on the direction of the spectral axis.
if (wave_right >= wave_left).all():
wave_plus, wave_minus = wave_right.T, wave_left.T
elif (wave_right <= wave_left).all():
wave_plus, wave_minus = wave_left.T, wave_right.T
else:
raise ValueError('Bad pixel values for wavelength.')
return wave_plus, wave_minus
def oversample_grid(wave_grid, n_os=1):
"""Create an oversampled version of the input 1D wavelength grid.
:param wave_grid: Wavelength grid to be oversampled.
:param n_os: Oversampling factor. If it is a scalar, take the same value for each
interval of the grid. If it is an array, n_os specifies the oversampling
at each interval of the grid, so len(n_os) = len(wave_grid) - 1.
:type wave_grid: array[float]
:type n_os: int or array[int]
:returns: wave_grid_os - The oversampled wavelength grid.
:rtype: array[float]
"""
# Convert n_os to an array.
n_os = np.asarray(n_os)
# n_os needs to have the dimension: len(wave_grid) - 1.
if n_os.ndim == 0:
# A scalar was given, repeat the value.
n_os = np.repeat(n_os, len(wave_grid) - 1)
elif len(n_os) != (len(wave_grid) - 1):
# An array of incorrect size was given.
msg = 'n_os must be a scalar or an array of size len(wave_grid) - 1.'
raise ValueError(msg)
# Grid intervals.
delta_wave = np.diff(wave_grid)
# Initialize the new oversampled wavelength grid.
wave_grid_os = wave_grid.copy()
# Iterate over oversampling factors to generate new grid points.
for i_os in range(1, n_os.max()):
# Consider only intervals that are not complete yet.
mask = n_os > i_os
# Compute the new grid points.
sub_grid = (wave_grid[:-1][mask] + i_os*delta_wave[mask]/n_os[mask])
# Add the grid points to the oversampled wavelength grid.
wave_grid_os = np.concatenate([wave_grid_os, sub_grid])
# Take only uniqyue values and sort them.
wave_grid_os = np.unique(wave_grid_os)
return wave_grid_os
def extrapolate_grid(wave_grid, wave_range, poly_ord):
"""Extrapolate the given 1D wavelength grid to cover a given range of values
by fitting the derivate with a polynomial of a given order and using it to
compute subsequent values at both ends of the grid.
:param wave_grid: Wavelength grid to be extrapolated.
:param wave_range: Wavelength range the new grid should cover.
:param poly_ord: Order of the polynomial used to fit the derivative of
wave_grid.
:type wave_grid: array[float]
:type wave_range: list[float]
:type poly_ord: int
:returns: wave_grid_ext - The extrapolated 1D wavelength grid.
:rtype: array[float]
"""
# Define delta_wave as a function of wavelength by fitting a polynomial.
delta_wave = np.diff(wave_grid)
pars = np.polyfit(wave_grid[:-1], delta_wave, poly_ord)
f_delta = np.poly1d(pars)
# Extrapolate out-of-bound values on the left-side of the grid.
grid_left = []
if wave_range[0] < wave_grid.min():
# Compute the first extrapolated grid point.
grid_left = [wave_grid.min() - f_delta(wave_grid.min())]
# Iterate until the end of wave_range is reached.
while True:
next_val = grid_left[-1] - f_delta(grid_left[-1])
if next_val < wave_range[0]:
break
else:
grid_left.append(next_val)
# Sort extrapolated vales (and keep only unique).
grid_left = np.unique(grid_left)
# Extrapolate out-of-bound values on the right-side of the grid.
grid_right = []
if wave_range[-1] > wave_grid.max():
# Compute the first extrapolated grid point.
grid_right = [wave_grid.max() + f_delta(wave_grid.max())]
# Iterate until the end of wave_range is reached.
while True:
next_val = grid_right[-1] + f_delta(grid_right[-1])
if next_val > wave_range[-1]:
break
else:
grid_right.append(next_val)
# Sort extrapolated vales (and keep only unique).
grid_right = | np.unique(grid_right) | numpy.unique |
from __future__ import print_function
import numpy as np
from scipy.optimize import curve_fit
import os, copy
from openmdao.api import IndepVarComp, Component, Group, Problem
from ccblade.ccblade_component import CCBladePower, CCBladeLoads, CCBladeGeometry
from commonse import gravity, NFREQ
from commonse.csystem import DirectionVector
from commonse.utilities import trapz_deriv, interp_with_deriv
from akima import Akima, akima_interp_with_derivs
import _pBEAM
import _bem # TODO: move to rotoraero
from rotorse import RPM2RS, RS2RPM
from rotorse.rotor_geometry import RotorGeometry, TURBULENCE_CLASS, TURBINE_CLASS, DRIVETRAIN_TYPE
from rotorse.rotor_geometry_yaml import ReferenceBlade
from rotorse.precomp import _precomp
# ---------------------
# Base Components
# ---------------------
class BeamPropertiesBase(Component):
def __init__(self, NPTS):
super(BeamPropertiesBase, self).__init__()
self.add_output('beam:z', val=np.zeros(NPTS), units='m', desc='locations of properties along beam')
self.add_output('beam:EA', val=np.zeros(NPTS), units='N', desc='axial stiffness')
self.add_output('beam:EIxx', val=np.zeros(NPTS), units='N*m**2', desc='edgewise stiffness (bending about :ref:`x-direction of airfoil aligned coordinate system <blade_airfoil_coord>`)')
self.add_output('beam:EIyy', val=np.zeros(NPTS), units='N*m**2', desc='flatwise stiffness (bending about y-direction of airfoil aligned coordinate system)')
self.add_output('beam:EIxy', val=np.zeros(NPTS), units='N*m**2', desc='coupled flap-edge stiffness')
self.add_output('beam:GJ', val=np.zeros(NPTS), units='N*m**2', desc='torsional stiffness (about axial z-direction of airfoil aligned coordinate system)')
self.add_output('beam:rhoA', val=np.zeros(NPTS), units='kg/m', desc='mass per unit length')
self.add_output('beam:rhoJ', val=np.zeros(NPTS), units='kg*m', desc='polar mass moment of inertia per unit length')
self.add_output('beam:Tw_iner', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_output('beam:x_ec', val=np.zeros(NPTS), units='m', desc='x-distance to elastic center from point about which above structural properties are computed (airfoil aligned coordinate system)')
self.add_output('beam:y_ec', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_output('beam:flap_iner', val=np.zeros(NPTS), units='kg/m', desc='Section flap inertia about the Y_G axis per unit length.')
self.add_output('beam:edge_iner', val=np.zeros(NPTS), units='kg/m', desc='Section lag inertia about the X_G axis per unit length')
class StrucBase(Component):
def __init__(self, NPTS):
super(StrucBase, self).__init__()
# all inputs/outputs in airfoil coordinate system
self.add_param('Px_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction at max deflection condition')
self.add_param('Py_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction at max deflection condition')
self.add_param('Pz_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction at max deflection condition')
self.add_param('Px_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction at max strain condition')
self.add_param('Py_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction at max strain condition')
self.add_param('Pz_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction at max strain condition')
self.add_param('Px_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction for deflection used in generated power curve')
self.add_param('Py_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction for deflection used in generated power curve')
self.add_param('Pz_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction for deflection used in generated power curve')
self.add_param('xu_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on upper surface for strain calculation')
self.add_param('xl_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on lower surface for strain calculation')
self.add_param('yu_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on upper surface for strain calculation')
self.add_param('yl_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on lower surface for strain calculation')
self.add_param('xu_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_param('xl_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.add_param('yu_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_param('yl_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.add_param('Mx_damage', val=np.zeros(NPTS), units='N*m', desc='damage equivalent moments about airfoil x-direction')
self.add_param('My_damage', val=np.zeros(NPTS), units='N*m', desc='damage equivalent moments about airfoil y-direction')
self.add_param('strain_ult_spar', val=0.0, desc='ultimate strain in spar cap')
self.add_param('strain_ult_te', val=0.0, desc='uptimate strain in trailing-edge panels')
self.add_param('gamma_fatigue', val=0.0, desc='safety factor for fatigue')
self.add_param('m_damage', val=0.0, desc='slope of S-N curve for fatigue analysis')
self.add_param('lifetime', val=0.0, units='year', desc='number of years used in fatigue analysis')
self.add_param('beam:z', val=np.zeros(NPTS), units='m', desc='locations of properties along beam')
self.add_param('beam:EA', val=np.zeros(NPTS), units='N', desc='axial stiffness')
self.add_param('beam:EIxx', val=np.zeros(NPTS), units='N*m**2', desc='edgewise stiffness (bending about :ref:`x-direction of airfoil aligned coordinate system <blade_airfoil_coord>`)')
self.add_param('beam:EIyy', val=np.zeros(NPTS), units='N*m**2', desc='flatwise stiffness (bending about y-direction of airfoil aligned coordinate system)')
self.add_param('beam:EIxy', val=np.zeros(NPTS), units='N*m**2', desc='coupled flap-edge stiffness')
self.add_param('beam:GJ', val=np.zeros(NPTS), units='N*m**2', desc='torsional stiffness (about axial z-direction of airfoil aligned coordinate system)')
self.add_param('beam:rhoA', val=np.zeros(NPTS), units='kg/m', desc='mass per unit length')
self.add_param('beam:rhoJ', val=np.zeros(NPTS), units='kg*m', desc='polar mass moment of inertia per unit length')
self.add_param('beam:x_ec', val=np.zeros(NPTS), units='m', desc='x-distance to elastic center from point about which above structural properties are computed (airfoil aligned coordinate system)')
self.add_param('beam:y_ec', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
# outputs
self.add_output('blade_mass', val=0.0, units='kg', desc='mass of one blades')
self.add_output('blade_moment_of_inertia', val=0.0, units='kg*m**2', desc='out of plane moment of inertia of a blade')
self.add_output('freq', val=np.zeros(NFREQ), units='Hz', desc='first nF natural frequencies of blade')
self.add_output('dx_defl', val= | np.zeros(NPTS) | numpy.zeros |
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_sph
from scipy.io import savemat, loadmat
import os
from timeit import default_timer as timer
import sys
from copy import deepcopy
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (20, 20, 20)
(dx, dy, dz) = (0.2, 0.2, 0.2)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta).astype(int)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
tMax = 6000; dt = 0.5
# tMax = 500; dt = 0.5
# tMax = 0.5; dt = 0.5
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('NGridPoints: {0}'.format(NGridPoints))
# Experimental params
expParams = pf_dynamic_sph.Zw_expParams_2021()
L_exp2th, M_exp2th, T_exp2th = pf_dynamic_sph.unitConv_exp2th(expParams['n0_BEC_scale'], expParams['mB'])
kB = 1.38064852e-23 # Boltzmann constant in J/K
hbar = 1.0555e-34 # reduced Planck's constant (J*s/rad)
aIBexp_Vals = np.array([-1000, -750, -500, -375, -250, -125, -60, -20, 0, 20, 50, 125, 175, 250, 375, 500, 750, 1000])
# n0_BEC = np.array([5.51533197e+19, 5.04612835e+19, 6.04947525e+19, 5.62709096e+19, 6.20802175e+19, 7.12364194e+19, 6.74430590e+19, 6.52854564e+19, 5.74487521e+19, 6.39240612e+19, 5.99344093e+19, 6.12326489e+19, 6.17370181e+19, 5.95291621e+19, 6.09224617e+19, 6.35951755e+19, 5.52594316e+19, 5.94489028e+19]) # peak BEC density (given in in m^(-3))
n0_BEC = np.array([5.50743315e+19, 5.03889459e+19, 6.04081899e+19, 5.61903369e+19, 6.19914061e+19, 7.11346218e+19, 6.73466436e+19, 6.51920977e+19, 5.73665093e+19, 6.38326341e+19, 5.98486416e+19, 6.11450398e+19, 6.16486935e+19, 5.94439691e+19, 6.08352926e+19, 6.35042149e+19, 5.51802931e+19, 5.93638236e+19])
RTF_BEC_X = np.array([8.48469347093994, 8.11111072629368, 8.89071272031954, 8.57125199684266, 9.00767433275159, 9.65522167387697, 9.39241266912852, 9.23956650925869, 8.66153179309422, 9.14179769236378, 8.84900230929328, 8.94534024135962, 8.98248647105392, 8.81871271135454, 8.92241777405925, 9.11802005065468, 8.49295023977057, 8.81270137636933]) # Thomas-Fermi radius of BEC in x-direction (given in um)
RTF_BEC_Y = np.array([11.4543973014280, 11.4485027292274, 12.0994087866866, 11.1987472415996, 12.6147755284164, 13.0408759297917, 12.8251948079726, 12.4963915490121, 11.6984708883771, 12.1884624646191, 11.7981246004719, 11.8796464214276, 12.4136593404667, 12.3220325703494, 12.0104329130883, 12.1756670927480, 10.9661042681457, 12.1803009563806]) # Thomas-Fermi radius of BEC in direction of oscillation (given in um)
RTF_BEC_Z = np.array([70.7057789244995, 67.5925893857806, 74.0892726693295, 71.4270999736888, 75.0639527729299, 80.4601806156414, 78.2701055760710, 76.9963875771558, 72.1794316091185, 76.1816474363648, 73.7416859107773, 74.5445020113302, 74.8540539254493, 73.4892725946212, 74.3534814504937, 75.9835004221224, 70.7745853314214, 73.4391781364111]) # Thomas-Fermi radius of BEC in z-direction (given in um)
Na_displacement = np.array([26.2969729628679, 22.6668334850173, 18.0950989598699, 20.1069898676222, 14.3011351453467, 18.8126473489499, 17.0373115356076, 18.6684373282353, 18.8357213162278, 19.5036039713438, 21.2438389441807, 18.2089748680659, 18.0433963046778, 8.62940156299093, 16.2007030552903, 23.2646987822343, 24.1115616621798, 28.4351972435186]) # initial position of the BEC (in um) -> assumes that lab frame origin is the center of the TiSa beam (to the left of BEC)
K_displacement_raw = np.array([0.473502276902047, 0.395634326123081, 8.66936929134637, 11.1470221226478, 9.34778274195669, 16.4370036199872, 19.0938486958001, 18.2135041439547, 21.9211790347041, 20.6591098913628, 19.7281375591975, 17.5425503131171, 17.2460344933717, 11.7179407507981, 12.9845862662090, 9.18113956217101, 11.9396846941782, 4.72461841775226]) # initial position of the impurity (in um)
K_displacement_scale = np.mean(K_displacement_raw[6:11] / Na_displacement[6:11])
K_displacement = deepcopy(K_displacement_raw); K_displacement[0:6] = K_displacement_scale * Na_displacement[0:6]; K_displacement[11::] = K_displacement_scale * Na_displacement[11::] # in um
K_relPos = K_displacement - Na_displacement # in um
omega_Na = | np.array([465.418650581347, 445.155256942448, 461.691943131414, 480.899902898451, 448.655522184374, 465.195338759998, 460.143258369460, 464.565377197007, 465.206177963899, 471.262139163205, 471.260672147216, 473.122081065092, 454.649394420577, 449.679107889662, 466.770887179217, 470.530355145510, 486.615655444221, 454.601540658640]) | numpy.array |
import torch
import torch.nn as nn
import sys
sys.path.append(r'../input/timmyy/pytorch-image-models-master')
import timm
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import pandas as pd
import matplotlib.pyplot as plt
import os
import cv2
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
import re
import copy
from torchvision.utils import save_image
import json
import glob
from sklearn.metrics import roc_auc_score
import torch.nn.functional as F
from skimage import color, io, transform
import torchvision
from torchvision import models, datasets, transforms
from torch.optim import lr_scheduler
path = r'../input/11-full-label/ultimate_11_label_teacher_deleted_concatenated.csv'
img_path = r'../input/ranzcr-clip-catheter-line-classification/train'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class student_dataset(Dataset):
def __init__(self, csv_file, root_dir):
self.da_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
def __len__(self):
return len(self.da_frame)
def __getitem__(self, idx):
im = os.path.join(self.root_dir, self.da_frame.iloc[idx, 0] + '.jpg')
im_name = self.da_frame.iloc[idx, 0]
im_label = json.loads(self.da_frame.iloc[idx, 1])
im_label = | np.array(im_label) | numpy.array |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == int(n1): n1 = n1 + 1
if 2*int(n2/2) == int(n2): n2 = n2 + 1
c1 = n1 / 2 - anker[1]
c2 = n2 / 2 - anker[0]
n1 = int(n1)
n2 = int(n2)
c1 = int(c1)
c2 = int(c2)
if chatter > 3: print('array info : ',img.shape,d1,d2,n1,n2,c1,c2)
# the ankor is now centered in array a; initialize a with out_of_img_val
a = np.zeros( (n1,n2), dtype=float) + cval
if Tmpl : a_ = np.zeros( (n1,n2), dtype=float) + cval
# load array in middle
a[c1:c1+img.shape[0],c2:c2+img.shape[1]] = img
if Tmpl: a_[c1:c1+img.shape[0],c2:c2+img.shape[1]] = template['template']
# patch outer regions with something like mean to get rid of artifacts
mask = abs(a - cval) < 1.e-8
# Kludge:
# test image for bad data and make a fix by putting the image average in its place
dropouts = False
aanan = np.isnan(a) # process further for flagging
aagood = np.isfinite(a)
aaave = a[np.where(aagood)].mean()
a[np.where(aanan)] = aaave
if len( np.where(aanan)[0]) > 0 :
dropouts = True
print("extractSpecImg WARNING: BAD IMAGE DATA fixed by setting to mean of good data whole image ")
# now we want to rotate the array to have the dispersion in the x-direction
if angle < 40. :
theta = 180.0 - angle
else: theta = angle
if not use_rectext:
b = ndimage.rotate(a,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if Tmpl:
b_ = ndimage.rotate(a_,theta,reshape = False,order = 1,mode = 'constant',cval = cval)
if dropouts: #try to rotate the boolean image
aanan = ndimage.rotate(aanan,theta,reshape = False,order = 1,mode = 'constant',)
e2 = int(0.5*b.shape[0])
c = b[e2-int(slit_width/2):e2+int(slit_width/2),:]
if Tmpl: c_ = b_[e2-int(slit_width/2):e2+int(slit_width/2),:]
if dropouts: aanan = aanan[e2-int(slit_width/2):e2+int(slit_width/2),:]
ank_c = [ (c.shape[0]-1)/2+1, (c.shape[1]-1)/2+1 , 0, c.shape[1]] #~TODO:
if x_offset == False:
pass
else:
ank_c[1] += x_offset
if use_rectext:
# history: rectext is a fortran code that maintains proper density of quantity when
# performing a rotation.
# build the command for extracting the image with rectext
outfile= tempnames[tempntags.index('rectext')]
cosangle = np.cos(theta/180.*np.pi)
sinangle = np.sin(theta/180.*np.pi)
# distance anchor to pivot
dx_ank = - (hdr['naxis1']-anker[0])/cosangle + slit_width/2*sinangle #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
if np.abs(dx_ank) > 760: dx_ank = 760 # include zeroth order (375 for just first order)
# distance to end spectrum
dx_2 = -anker[0] /cosangle + slit_width/2/sinangle # to lhs edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dy_2 = (hdr['naxis2']-anker[1])/sinangle - slit_width/2/cosangle # to top edge #~FIXME: I am not sure if this is "+ 100.*sinangle" or "+ slit_width/2*sinangle"
dx = int(dx_ank + np.array([dx_2,dy_2]).min() ) # length rotated spectrum
dy = slit_width # width rotated spectrum
# pivot x0,y0
x0 = anker[0] - dx_ank*cosangle + dy/2.*sinangle
y0 = anker[1] - dx_ank*sinangle - dy/2.*cosangle
command= "rectext infile="+file+"+"+str(ext)
command+=" outfile="+outfile
command+=" angle="+str(theta)+" width="+str(dx)
command+=" height="+str(dy)+" x0="+str(x0)+" y0="+str(y0)
command+=" null="+str(cval)
command+=" chatter=5 clobber=yes"
print(command)
os.system(command)
c = extimg = pyfits.getdata(outfile,0)
ank_c = np.array([int(slit_width/2),dx_ank,0,extimg.shape[1]])
# out_of_img_val = 0.
if clobber:
os.system("rm "+outfile)
if Tmpl:
raise("background_template cannot be used with use_rectext option")
# version 2016-01-16 revision:
# the background can be extracted via a method from the strip image
#
# extract the strips with the background on both sides, and the spectral orders
# find optimised place of the spectrum
# first find parts not off the detector -> 'qofd'
eps1 = 1e-15 # remainder after resampling for intel-MAC OSX system (could be jacked up)
qofd = np.where( abs(c[int(slit_width/2),:] - cval) > eps1 )
# define constants for the spectrum in each mode
if wheelpos < 300: # UV grism
disrange = 150 # perhaps make parameter in call?
disscale = 10 # ditto
minrange = disrange/10 # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2]).min() # 1200 is most of the spectrum
else: # V grism
disrange = 120 # perhaps make parameter in call?
disscale = 5 # ditto
minrange = np.array([disrange/2,ank_c[1]-qofd[0].min() ]).max() # 300 is maximum
maxrange = np.array([disrange*disscale,c.shape[1]-ank_c[1]-2],qofd[0].max()-ank_c[1]).min() # 600 is most of the spectrum
if chatter > 1:
#print 'image was rotated; anchor in extracted image is ', ank_c[:2]
#print 'limits spectrum are ',ank_c[2:]
print('finding location spectrum from a slice around anchor x-sized:',minrange,':',maxrange)
print('offsetlimit = ', offsetlimit)
d = (c[:,int(ank_c[1]-minrange):int(ank_c[1]+maxrange)]).sum(axis=1).squeeze()
if len(qofd[0]) > 0:
ank_c[2] = min(qofd[0])
ank_c[3] = max(qofd[0])
else:
ank_c[2] = -1
ank_c[3] = -1
# y-position of anchor spectrum in strip image (allowed y (= [50,150], but search only in
# range defined by searchwidth (default=35) )
y_default=int(slit_width/2) # reference y
if (type(offsetlimit) == list):
if (len(offsetlimit)==2):
# sane y_default
if (offsetlimit[0] > 50) & (offsetlimit[0] < 150):
y_default=int(offsetlimit[0]+0.5) # round to nearest pixel
else:
raise IOError("parameter offsetlimit[0]=%i, must be in range [51,149]."+
"\nIs the aspect correction right (in reference images)?"%(offsetlimit[0]))
if offsetlimit[1] < 1:
fixoffset = offsetlimit[0]-int(slit_width/2)
else:
searchwidth=int(offsetlimit[1]+0.5)
if fixoffset == None:
offset = ( (np.where(d == (d[y_default-searchwidth:y_default+searchwidth]).max() ) )[0] - y_default )
if chatter>0: print('offset found from y=%i is %i '%(y_default ,-offset))
if len(offset) == 0:
print('offset problem: offset set to zero')
offset = 0
offset = offset[0]
if (type(offsetlimit) != list):
if (offsetlimit != None):
if abs(offset) >= offsetlimit:
offset = 0
print('This is larger than the offsetlimit. The offset has been set to 0')
if interactive:
offset = float(input('Please give a value for the offset: '))
else:
offset = fixoffset
if ank_c_0offset == True:
offset = 0
if chatter > 0:
print('offset used is : ', -offset)
if (type(offsetlimit) == list) & (fixoffset == None):
ank_c[0] = offsetlimit[0]-offset
else:
ank_c[0] += offset
print('image was rotated; anchor in extracted image is [', ank_c[0],',',ank_c[1],']')
print('limits spectrum on image in dispersion direction are ',ank_c[2],' - ',ank_c[3])
# Straight slit extraction (most basic extraction, no curvature):
sphalfwid = int(spwid-0.5)/2
splim1 = int(slit_width/2)+offset-sphalfwid+1
splim2 = splim1 + spwid
spimg = c[int(splim1):int(splim2),:]
if chatter > 0:
print('Extraction limits across dispersion: splim1,splim2 = ',splim1,' - ',splim2)
bg, bg1, bg2, bgsigma, bgimg, bg_limits, bgextras = findBackground(c,
background_lower=background_lower, background_upper=background_upper,yloc_spectrum=ank_c[0] )
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
bgmean = bg
bg = 0.5*(bg1+bg2)
if chatter > 0: print('Background : %10.2f +/- %10.2f (1-sigma error)'%( bgmean,bgsigma))
# define the dispersion with origen at the projected position of the
# 2600 point in first order
dis = np.arange((c.shape[1]),dtype=np.int16) - ank_c[1]
# remove the background
#bgimg_ = 0.* spimg.copy()
#for i in range(bgimg_.shape[0]): bgimg_[i,:]=bg
spnetimg = spimg - bg
spnet = spnetimg.sum(axis=0)
result = {"dis":dis,"spnet":spnet,"bg":bg,"bg1":bg1,
"bg2":bg2,"bgsigma":bgsigma,"bgimg":bgimg,
"bg_limits_used":bg_limits,"bgextras":bgextras,
"extimg":c,"spimg":spimg,"spnetimg":spnetimg,
"offset":offset,"ank_c":ank_c,'dropouts':dropouts}
if dropouts: result.update({"dropout_mask":aanan})
if Tmpl: result.update({"template_extimg":c_})
return result
def sigclip1d_mask(array1d, sigma, badval=None, conv=1e-5, maxloop=30):
"""
sigma clip array around mean, using number of sigmas 'sigma'
after masking the badval given, requiring finite numbers, and
either finish when converged or maxloop is reached.
return good mask
"""
import numpy as np
y = np.asarray(array1d)
if badval != None:
valid = (np.abs(y - badval) > 1e-6) & np.isfinite(y)
else:
valid = np.isfinite(y)
yv = y[valid]
mask = yv < (yv.mean() + sigma * yv.std())
ym_ = yv.mean()
ymean = yv[mask].mean()
yv = yv[mask]
while (np.abs(ym_-ymean) > conv*np.abs(ymean)) & (maxloop > 0):
ym_ = ymean
mask = ( yv < (yv.mean() + sigma * yv.std()) )
yv = yv[mask]
ymean = yv.mean()
maxloop -= 1
valid[valid] = y[valid] < ymean + sigma*yv.std()
return valid
def background_profile(img, smo1=30, badval=None):
"""
helper routine to determine for the rotated image
(spectrum in rows) the background using sigma clipping.
"""
import numpy as np
from scipy import interpolate
bgimg = img.copy()
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# look at the summed rows of the image
u_ysum = []
for i in range(ny):
u_ysum.append(bgimg[i,:].mean())
u_ysum = np.asarray(u_ysum)
u_ymask = sigclip1d_mask(u_ysum, 2.5, badval=badval, conv=1e-5, maxloop=30)
u_ymean = u_ysum[u_ymask].mean()
# look at the summed columns after filtering bad rows
u_yindex = np.where(u_ymask)[0]
u_xsum = []
u_std = []
for i in range(nx):
u_x1 = bgimg[u_yindex, i].squeeze()
# clip u_x1
u_x1mask = sigclip1d_mask(u_x1, 2.5, badval=None, conv=1e-5, maxloop=30)
u_xsum.append(u_x1[u_x1mask].mean())
u_std.append(u_x1[u_x1mask].std())
#print u_x1[u_x1mask]
#if np.isfinite(u_x1mask.mean()) & len(u_x1[u_x1mask])>0:
# print "%8.2f %8.2f %8.2f "%(u_x1[u_x1mask].mean(),u_x1[u_x1mask].std(),u_x1[u_x1mask].max())
# the best background estimate of the typical row is now u_xsum
# fit a smooth spline through the u_xsum values (or boxcar?)
#print "u_x means "
#print u_xsum
u_xsum = np.asarray(u_xsum)
u_std = np.asarray(u_std)
u_xsum_ok = np.isfinite(u_xsum)
bg_tcp = interpolate.splrep(np.arange(nx)[u_xsum_ok],
np.asarray(u_xsum)[u_xsum_ok], s=smo1)
# representative background profile in column
u_x = interpolate.splev(np.arange(nx), bg_tcp, )
return u_xsum, u_x, u_std
def findBackground(extimg,background_lower=[None,None], background_upper=[None,None],yloc_spectrum=int(slit_width/2),
smo1=None, smo2=None, chatter=2):
'''Extract the background from the image slice containing the spectrum.
Parameters
----------
extimg : 2D array
image containing spectrum. Dispersion approximately along x-axis.
background_lower : list
distance in pixels from `yloc_spectrum` of the limits of the lower background region.
background_upper : list
distance in pixels from `yloc_spectrum` of the limits of the upper background region.
yloc_spectrum : int
pixel `Y` location of spectrum
smo1 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
smo2 : float
smoothing parameter passed to smoothing spline fitting routine. `None` for default.
chatter : int
verbosity
Returns
-------
bg : float
mean background
bg1, bg2 : 1D arrays
bg1 = lower background; bg2 = upper background
inherits size from extimg.shape x-xoordinate
bgsig : float
standard deviation of background
bgimg : 2D array
image of the background constructed from bg1 and/or bg2
bg_limits_used : list, length 4
limits used for the background in the following order: lower background, upper background
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) : tuple
various other background measures
Notes
-----
**Global parameter**
- **background_method** : {'boxcar','splinefit'}
The two background images can be computed 2 ways:
1. 'splinefit': sigma clip image, then fit a smoothing spline to each
row, then average in y for each background region
2. 'boxcar': select the background from the smoothed image created
by method 1 below.
3. 'sigmaclip': do sigma clipping on rows and columns to get column
profile background, then clip image and mask, interpolate over masked
bits.
extimg is the image containing the spectrum in the 1-axis centered in 0-axis
`ank` is the position of the anchor in the image
I create two background images:
1. split the image strip into 40 portions in x, so that the background variation is small
compute the mean
sigma clip (3 sigma) each area to to the local mean
replace out-of-image pixels with mean of whole image (2-sigma clipped)
smooth with a boxcar by the smoothing factor
2. compute the background in two regions upper and lower
linearly interpolate in Y between the two regions to create a background image
bg1 = lower background; bg2 = upper background
smo1, smo2 allow one to relax the smoothing factor in computing the smoothing spline fit
History
-------
- 8 Nov 2011 NPM Kuin complete overhaul
things to do: get quality flagging of bad background points, edges perhaps done here?
- 13 Aug 2012: possible problem was seen of very bright sources not getting masked out properly
and causing an error in the background that extends over a large distance due to the smoothing.
The cause is that the sources are more extended than can be handled by this method.
A solution would be to derive a global background
- 30 Sep 2014: background fails in visible grism e.g., 57977004+1 nearby bright spectrum
new method added (4x slower processing) to screen the image using sigma clipping
'''
import sys
import numpy as np
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
from scipy import interpolate
import stsci.imagestats as imagestats
# initialize parameters
bgimg = extimg.copy()
out = np.where( (np.abs(bgimg-cval) <= 1e-6) )
in_img = np.where( (np.abs(bgimg-cval) > 1e-6) & np.isfinite(bgimg) )
nx = bgimg.shape[1] # number of points in direction of dispersion
ny = bgimg.shape[0] # width of the image
# sigma screening of background taking advantage of the dispersion being
# basically along the x-axis
if _PROFILE_BACKGROUND_:
bg, u_x, bg_sig = background_profile(bgimg, smo1=30, badval=cval)
u_mask = np.zeros((ny,nx),dtype=bool)
for i in range(ny):
u_mask[i,(bgimg[i,:].flatten() < u_x) &
np.isfinite(bgimg[i,:].flatten())] = True
bkg_sc = np.zeros((ny,nx),dtype=float)
# the following leaves larger disps in the dispersion but less noise;
# tested but not implemented, as it is not as fast and the mean results
# are comparable:
#for i in range(ny):
# uf = interpolate.interp1d(np.where(u_mask[i,:])[0],bgimg[i,u_mask[i,:]],bounds_error=False,fill_value=cval)
# bkg_sc[i,:] = uf(np.arange(nx))
#for i in range(nx):
# ucol = bkg_sc[:,i]
# if len(ucol[ucol != cval]) > 0:
# ucol[ucol == cval] = ucol[ucol != cval].mean()
for i in range(nx):
ucol = bgimg[:,i]
if len(ucol[u_mask[:,i]]) > 0:
ucol[np.where(u_mask[:,i] == False)[0] ] = ucol[u_mask[:,i]].mean()
bkg_sc[:,i] = ucol
if background_method == 'sigmaclip':
return bkg_sc
else:
# continue now with the with screened image
bgimg = bkg_sc
kx0 = 0 ; kx1 = nx # default limits for valid lower background
kx2 = 0 ; kx3 = nx # default limits for valid upper background
ny4 = int(0.25*ny) # default width of each default background region
sig1 = 1 # unit for background offset, width
bg_limits_used = [0,0,0,0] # return values used
## in the next section I replace the > 2.5 sigma peaks with the mean
## after subdividing the image strip to allow for the
## change in background level which can be > 2 over the
## image. Off-image parts are set to image mean.
# this works most times in the absence of the sigma screening,but
# can lead to overestimates of the background.
# the call to the imagestats package is only done here, and should
# consider replacement. Its not critical for the program.
#
xlist = np.linspace(0,bgimg.shape[1],80)
xlist = np.asarray(xlist,dtype=int)
imgstats = imagestats.ImageStats(bgimg[in_img[0],in_img[1]],nclip=3)
bg = imgstats.mean
bgsig = imgstats.stddev
if chatter > 2:
sys.stderr.write( 'background statistics: mean=%10.2f, sigma=%10.2f '%
(imgstats.mean, imgstats.stddev))
# create boolean image flagging good pixels
img_good = np.ones(extimg.shape,dtype=bool)
# flag area out of picture as bad
img_good[out] = False
# replace high values in image with estimate of mean and flag them as not good
for i in range(78):
# after the sigma screening this is a bit of overkill, leave in for now
sub_bg = boxcar(bgimg[:,xlist[i]:xlist[i+2]] , (5,5), mode='reflect', cval=cval)
sub_bg_use = np.where( np.abs(sub_bg - cval) > 1.0e-5 ) # list of coordinates
imgstats = None
if sub_bg_use[0].size > 0:
imgstats = imagestats.ImageStats(sub_bg[sub_bg_use],nclip=3)
# patch values in image (not out of image) with mean if outliers
aval = 2.0*imgstats.stddev
img_clip_ = (
(np.abs(bgimg[:,xlist[i]:xlist[i+2]]-cval) < 1e-6) |
(np.abs(sub_bg - imgstats.mean) > aval) |
(sub_bg <= 0.) | np.isnan(sub_bg) )
bgimg[:,xlist[i]:xlist[i+2]][img_clip_] = imgstats.mean # patch image
img_good[:,xlist[i]:xlist[i+2]][img_clip_] = False # flag patches
# the next section selects the user-selected or default background for further processing
if chatter > 1:
if background_method == 'boxcar':
sys.stderr.write( "BACKGROUND METHOD: %s; background smoothing = %s\n"%
(background_method,background_smoothing))
else:
sys.stderr.write( "BACKGROUND METHOD:%s\n"%(background_method ))
if not ((background_method == 'splinefit') | (background_method == 'boxcar') ):
sys.stderr.write('background method missing; currently reads : %s\n'%(background_method))
if background_method == 'boxcar':
# boxcar smooth in x,y using the global parameter background_smoothing
bgimg = boxcar(bgimg,background_smoothing,mode='reflect',cval=cval)
if background_lower[0] == None:
bg1 = bgimg[0:ny4,:].copy()
bg_limits_used[0]=0
bg_limits_used[1]=ny4
bg1_good = img_good[0:ny4,:]
kx0 = np.min(np.where(img_good[0,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[0,:]))-10
else:
# no curvature, no second order: limits
bg1_1= np.max(np.array([yloc_spectrum - sig1*background_lower[0],20 ]))
#bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[0]+background_lower[1]),0]))
bg1_0= np.max(np.array([yloc_spectrum - sig1*(background_lower[1]),0]))
bg1 = bgimg[int(bg1_0):int(bg1_1),:].copy()
bg_limits_used[0]=bg1_0
bg_limits_used[1]=bg1_1
bg1_good = img_good[int(bg1_0):int(bg1_1),:]
kx0 = np.min(np.where(img_good[int(bg1_0),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx1 = np.max(np.where(img_good[int(bg1_0),:]))-10 # corrected for edge effects
#if ((kx2-kx0) < 20):
# print 'not enough valid upper background points'
if background_upper[0] == None:
bg2 = bgimg[-ny4:ny,:].copy()
bg_limits_used[2]=ny-ny4
bg_limits_used[3]=ny
bg2_good = img_good[-ny4:ny,:]
kx2 = np.min(np.where(img_good[ny-1,:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[ny-1,:]))-10
else:
bg2_0= np.min(np.array([yloc_spectrum + sig1*background_upper[0],(slit_width-20) ]))
#bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[0]+background_upper[1]),ny]))
bg2_1= np.min(np.array([yloc_spectrum + sig1*(background_upper[1]),ny]))
bg2 = bgimg[int(bg2_0):int(bg2_1),:].copy()
bg_limits_used[2]=bg2_0
bg_limits_used[3]=bg2_1
bg2_good = img_good[int(bg2_0):int(bg2_1),:]
kx2 = np.min(np.where(img_good[int(bg2_1),:]))+10 # assuming the spectrum is in the top two thirds of the detector
kx3 = np.max(np.where(img_good[int(bg2_1),:]))-10
#if ((kx3-kx2) < 20):
# print 'not enough valid upper background points'
if background_method == 'boxcar':
bg1 = bg1_dis = bg1.mean(0)
bg2 = bg2_dis = bg2.mean(0)
bg1_dis_good = np.zeros(nx,dtype=bool)
bg2_dis_good = np.zeros(nx,dtype=bool)
for i in range(nx):
bg1_dis_good[i] = np.where(bool(int(bg1_good[:,i].mean(0))))
bg2_dis_good[i] = np.where(bool(int(bg2_good[:,i].mean(0))))
if background_method == 'splinefit':
# mean bg1_dis, bg2_dis across dispersion
bg1_dis = np.zeros(nx) ; bg2_dis = np.zeros(nx)
for i in range(nx):
bg1_dis[i] = bg1[:,i][bg1_good[:,i]].mean()
if not bool(int(bg1_good[:,i].mean())):
bg1_dis[i] = cval
bg2_dis[i] = bg2[:,i][bg2_good[:,i]].mean()
if not bool(int(bg2_good[:,i].mean())):
bg2_dis[i] = cval
# some parts of the background may have been masked out completely, so
# find the good points and the bad points
bg1_dis_good = np.where( np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7) )
bg2_dis_good = np.where( np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7) )
bg1_dis_bad = np.where( ~(np.isfinite(bg1_dis) & (np.abs(bg1_dis - cval) > 1.e-7)) )
bg2_dis_bad = np.where( ~(np.isfinite(bg2_dis) & (np.abs(bg2_dis - cval) > 1.e-7)) )
# fit a smoothing spline to each background
x = bg1_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo1 != None: s = smo1
if len(x) > 40: x = x[7:len(x)-7] # clip end of spectrum where there is downturn
w = np.ones(len(x))
tck1 = interpolate.splrep(x,bg1_dis[x],w=w,xb=bg1_dis_good[0][0],xe=bg1_dis_good[0][-1],k=3,s=s)
bg1 = np.ones(nx) * (bg1_dis[x]).mean()
bg1[np.arange(kx0,kx1)] = interpolate.splev(np.arange(kx0,kx1), tck1)
x = bg2_dis_good[0]
s = len(x) - np.sqrt(2.*len(x))
if smo2 != None: s = smo1
if len(x) > 40: x = x[10:len(x)-10] # clip
w = np.ones(len(x))
tck2 = interpolate.splrep(x,bg2_dis[x],w=w,xb=bg2_dis_good[0][0],xe=bg2_dis_good[0][-1],k=3,s=s)
bg2 = np.ones(nx) * (bg2_dis[x]).mean()
bg2[np.arange(kx2,kx3)] = interpolate.splev(np.arange(kx2,kx3), tck2)
# force bg >= 0:
# spline can do weird things ?
negvals = bg1 < 0.0
if negvals.any():
bg1[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
negvals = bg2 < 0.0
if negvals.any():
bg2[negvals] = 0.0
if chatter > 1:
print("background 1 set to zero in ",len(np.where(negvals)[0])," points")
# image constructed from linear inter/extra-polation of bg1 and bg2
bgimg_lin = np.zeros(ny*nx).reshape(ny,nx)
dbgdy = (bg2-bg1)/(ny-1)
for i in range(ny):
bgimg_lin[i,:] = bg1 + dbgdy*i
# interpolate background and generate smooth interpolation image
if ( (background_lower[0] == None) & (background_upper[0] == None)):
# default background region
dbgdy = (bg2-bg1)/150.0 # assuming height spectrum 200 and width extraction regions 30 pix each
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-25)
bgimg[i9,0:kx0] = bg2[0:kx0]
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("1..BACKGROUND DEFAULT from BG1 and BG2")
elif ((background_lower[0] != None) & (background_upper[0] == None)):
# set background to lower background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg1
if chatter > 2: print("2..BACKGROUND from lower BG1 only")
elif ((background_upper[0] != None) & (background_lower[0] == None)):
# set background to that of upper background region
for i9 in range(bgimg.shape[0]):
bgimg[i9,:] = bg2
if chatter > 2: print("3..BACKGROUND from upper BG2 only")
else:
# linear interpolation of the two background regions
dbgdy = (bg2-bg1)/(background_upper[0]+0.5*background_upper[1]+background_lower[0]+0.5*background_lower[1])
for i9 in range(bgimg.shape[0]):
bgimg[i9,kx0:kx1] = bg1[kx0:kx1] + dbgdy[kx0:kx1]*(i9-int(int(slit_width/2)-(background_lower[0]+0.5*background_lower[1])))
bgimg[i9,0:kx0] = bg2[0:kx0] # assuming that the spectrum in not in the lower left corner
bgimg[i9,kx1:nx] = bg2[kx1:nx]
if chatter > 2: print("4..BACKGROUND from BG1 and BG2")
return bg, bg1, bg2, bgsig, bgimg, bg_limits_used, (bg1_good, bg1_dis,
bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin)
def interpol(xx,x,y):
'''
linearly interpolate a function y(x) to return y(xx)
no special treatment of boundaries
2011-12-10 NPMKuin skip all data points which are not finite
'''
import numpy as np
x = np.asarray(x.ravel())
y = np.asarray(y.ravel())
q0 = np.isfinite(x) & np.isfinite(y) # filter out NaN values
q1 = np.where(q0)
if len(q1[0]) == 0:
print("error in arrays to be interpolated")
print("x:",x)
print("y:",y)
print("arg:",xx)
x1 = x[q1[0]]
y1 = y[q1[0]]
q2 = np.where( np.isfinite(xx) ) # filter out NaN values
kk = x1.searchsorted(xx[q2])-1
# should extrapolate if element of k = len(a)
#q = np.where(k == len(a)) ; k[q] = k[q]-1
n = len(kk)
f = np.zeros(n)
f2 = np.zeros(len(xx))
for i in range(n):
k = kk[i]
if k > (len(x1)-2):
k = len(x1) - 2
s = (y1[k+1]-y1[k])/(x1[k+1]-x1[k])
f[i] = y1[k]+s*(xx[q2[0]][i]-x1[k])
f2[q2] = f
f2[int(not q2)] = np.NaN
return f2
def hydrogen(n,l):
'''
Return roughly the wavelength of the Hydrogen lines
Lymann spectrum: l=0, n>l+1
Balmer spectrum: l=1, n>2
Pachen spectrum: l=2, n>3
'''
# Rydberg constant in m-1 units
R = 1.097e7
inv_lam = R*(1./(l+1)**2 - 1./n**2)
lam = 1./inv_lam * 1e10
return lam
def boresight(filter='uvw1',order=1,wave=260,
r2d=77.0,date=0,chatter=0):
''' provide reference positions on the
UVOT filters for mapping and as function of
time for grisms.
This function name is for historical reasons,
and provides a key mapping function for the
spectral extraction.
The correct boresight of the (lenticular) filters
should be gotten from the Swift UVOT CALDB
as maintained by HEASARC. The positions here
are in some cases substantially different from
the boresight in the CALDB. They are reference
positions for the spectral extraction algorithms
rather than boresight.
The grism boresight positions at 260nm (uv grism)
and 420nm (visible grism) in first order are served
in an uncommon format (in DET pixels)
by adding (77,77) to the lenticular filter
RAW coordinate.(see TELDEF file) the grism
boresight was measured in DET coordinates,
not RAW. (offset correction should be 104,78)
Parameters
----------
filter : str
one of {'ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2'}
order : {0,1,2}
order for which the anchor is needed
wave : float
anchor wavelength in nm
r2d : float
additive factor in x,y to anchor position
date: long
format in swift time (s)
if 0 then provide the first order anchor
coordinates of the boresight for mapping
from the lenticular filter position
chatter : int
verbosity
Returns
-------
When *date* = 0:
For translation: The boresight for a filter
(in DET pixels) by adding (77,77) to the
lenticular filter RAW coordinate (see TELDEF file)
the grism boresight was measured in DET
(The default r2d=77 returns the correct
boresight for the grisms in detector
coordinates. To get the grism boresight in
detector image coordinates, subtract (104,78)
typically. The difference is due to the distortion
correction from RAW to DET)
When *date* is non-zero, and *order*=0:
The zeroth order boresight
NOTE:
-----
THE TRANSLATION OF LENTICULAR IMAGE TO GRISM
IMAGE IS ALWAYS THE SAME, INDEPENDENT OF THE
BORESIGHT.
THEREFORE THE BORESIGHT DRIFT DOES NOT AFFECT
THE GRISM ANCHOR POSITIONS AS LONG AS THE DEFAULT
BORESIGHT POSITIONS ARE USED.
[Becase those were used for the calibration].
However, the zeroth order "reference" position
drift affects the "uvotgraspcorr" - derived
WCS-S. The positions used
History:
2014-01-04 NPMK : rewrite to inter/extrapolate
the boresight positions
'''
from scipy.interpolate import interp1d
import numpy as np
filterlist = ['ug200','uc160','vg1000','vc955',
'wh','v','b','u','uvw1','uvm2','uvw2']
if filter == 'list': return filterlist
grismfilters = ['ug200','uc160','vg1000','vc955']
lenticular = ['v','b','u','uvw1','uvm2','uvw2']
#old pixel offset anchor based on pre-2010 data
# dates in swift time, drift [x.y] in pixels
#dates=[209952000,179971200,154483349,139968000,121838400]
#drift=[ [0,0], [+2.4,-2.0], [+3.4,-3.0], [+6.4,-10], [+6.4,-10]]
# data from Frank's plot (email 2 dec 2013, uvw1 filter)
# original plot was in arcsec, but the drift converted
# to pixels. uvw1 seems representative (except for white)
swtime = np.array([
1.25000000e+08, 1.39985684e+08, 1.60529672e+08,
1.89248438e+08, 2.23489068e+08, 2.46907209e+08,
2.66126366e+08, 2.79601770e+08, 2.89763794e+08,
3.01251301e+08, 3.13180634e+08, 3.28423998e+08,
3.43445470e+08, 3.59351249e+08, 3.75257678e+08,
4.50000000e+08])
boredx = (np.array([-1.6, -0.870,0.546,1.174,2.328,2.47,
2.813,3.076,3.400,3.805,4.149,4.656,
5.081,5.607,6.072,8.56 ])-1.9)/0.502
boredy = (np.array([ -0.75,-2.197,-4.857,-6.527,
-7.098,-7.252,-7.142,-7.560,
-7.670,-8.000,-8.043,-8.395,
-8.637,-9.142,-9.670,-11.9])+6.8)/0.502
# I assume the same overall drift for the grism
# boresight (in pixels). Perhaps a scale factor for the
# grism would be closer to 0.56 pix/arcsec
# the range has been extrapolated for better interpolation
# and also to support the near future. The early
# time extrapolation is different from the nearly constant
# boresight in the teldef but within about a pixel.
# I think the extrapolation is more accurate.
fx = interp1d(swtime,boredx,bounds_error=False,fill_value="extrapolate")
fy = interp1d(swtime,boredy,bounds_error=False,fill_value="extrapolate")
# reference anchor positions
reference0 = {'ug200': [1449.22, 707.7],
'uc160': [1494.9 , 605.8], #[1501.4 , 593.7], # ?[1494.9, 605.8],
'vg1000':[1506.8 , 664.3],
'vc955': [1542.5 , 556.4]}
# DO NOT CHANGE THE FOLLOWING VALUES AS THE WAVECAL DEPENDS ON THEM !!!
reference1 = {'ug200': [ 928.53,1002.69],
'uc160': [1025.1 , 945.3 ],
'vg1000':[ 969.3 ,1021.3 ],
'vc955': [1063.7 , 952.6 ]}
if (filter in grismfilters):
if (date > 125000000) and (order == 0):
anchor = reference0[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 125000000) and (order == 1):
anchor = reference1[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif order == 1:
anchor = reference1[filter]
anchor[0] += r2d
anchor[1] += r2d
return anchor
elif order == 0:
raise RuntimeError(
"The zeroth order reference position needs a date")
else:
return reference1[filter]
elif (date > 125000000) and (filter in lenticular):
ref_lent = {'v':[951.74,1049.89],
'b':[951.87,1049.67],
'u':[956.98,1047.84],
'uvw1':[951.20,1049.36],
'uvm2':[949.75,1049.30],
'uvw2':[951.11,1050.18]}
anchor = ref_lent[filter]
anchor[0] += r2d-fx(date)
anchor[1] += r2d-fy(date)
return anchor
elif (date > 122000000) and (filter == 'wh'):
print("approximate static white filter boresight")
if date > 209952000:
return 949.902+r2d, 1048.837+r2d
elif date > 179971200:
return 953.315+r2d, 1048.014+r2d
elif date > 154483349:
return 954.506+r2d, 1043.486+r2d
elif date > 139968000:
return 956.000+r2d, 1039.775+r2d
elif date > 121838400:
return 956.000+r2d, 1039.775+r2d
else: return filterlist
else:
# this is the version used initially *(changed 2 june 2009)
# DO NOT CHANGE THESE VALUES AS THE WAVECAL DEPENDS ON THEM !!!
if filter == 'uvw1': return 954.61+r2d, 1044.66+r2d
elif filter == 'wh' : return 954.51+r2d, 1043.49+r2d
elif filter == 'v' : return 955.06+r2d, 1045.98+r2d
elif filter == 'b' : return 955.28+r2d, 1045.08+r2d
elif filter == 'u' : return 960.06+r2d, 1043.33+r2d
elif filter == 'uvm2': return 953.23+r2d, 1044.90+r2d
elif filter == 'uvw2': return 953.23+r2d, 1044.90+r2d
elif filter == 'w1' : return 954.61+r2d, 1044.66+r2d
elif filter == 'm2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'w2' : return 953.23+r2d, 1044.90+r2d
elif filter == 'ug200':
if order == 1:
if wave == 260: return 928.53+r2d,1002.69+r2d
elif filter == 'uc160':
if order == 1:
if wave == 260: return 1025.1+27+r2d,945.3+r2d
elif filter == 'vg1000':
#elif order == 1: return 948.4+r2d, 1025.9+r2d
if order == 1: return 969.3+r2d, 1021.3+r2d
elif filter == 'vc955':
if order == 1: return 1063.7+r2d, 952.6+r2d
raise IOError("valid filter values are 'wh','v',"\
"'b','u','uvw1','uvm2','uvw2','ug200',"\
"'uc160','vg1000','vc955'\n")
def makeXspecInput(lamdasp,countrate,error,lamda_response=None,chatter=1):
''' Convert the count rate spectrum per pixel into a spectrum
on the given bins of the response function.
Parameters
----------
lamdasp : array
wavelengths spectrum
countrate : array
count rates at wavelengths
error : array
errors at wavelengths
kwargs : dict
- **lamda_response** : array
the wavelength for the response bins
- **chatter** : int
verbosity
Returns
-------
lambda : array
wavelengths of the bins
countrate : array
count rate in the bins
error : array
errors in the bins
Notes
-----
errors are summed as sqrt( sum (errors**2 ) )
'''
# calculate bin size response, data
if type(lamda_response) == typeNone:
print('need to read in response matrix file')
print(' please code it up')
return None
new_countrate = np.zeros(len(lamda_response))
new_error = np.zeros(len(lamda_response))
# find bin widths
dlamresp = lamda_response.copy()*0
for i in range(len(dlamresp) -1):
dlamresp[i+1] = lamda_response[i+1] - lamda_response[i]
dlamresp[0] = dlamresp[1] # set width first two data bins equal (could inter/extrapolate the lot)
dlam = lamdasp.copy()*0
for i in range(len(dlam) -1):
dlam[i+1]=lamdasp[i+1] - lamdasp[i]
dlam[0] = dlam[1]
#
for i in range(len(lamda_response)):
# find the pixels to use that have contributions to the bin
lam1 = lamda_response[i] - dlamresp[i]/2.0
lam2 = lamda_response[i] + dlamresp[i]/2.0
if ( (lam1 >= (np.max(lamdasp)+dlam[len(lamdasp)-1])) ^ (lam2 <= (np.min(lamdasp)-dlam[0]))):
# no count data
new_countrate[i] = 0
if ((chatter > 2) & (i < 450) & (i > 400)) :
print(' i = ',i,' lam1 = ',lam1,' lam2 = ', lam2,' <<< counts set to zero ')
print(' i = ',i,' term 1 ',(np.max(lamdasp)-dlam[len(lamdasp)-1]))
print(' i = ',i,' term 2 ',(np.min(lamdasp)+dlam[0] ))
else:
if chatter > 2: print('new bin ',i,' lam = ',lam1,' - ',lam2)
# find the bits to add
k = np.where( (lamdasp+dlam/2 > lam1) & (lamdasp-dlam/2 <= lam2) )
# the countrate in a bin is proportional to its width; make sure only
# the part of the data array that fall within the new bin is added
if chatter > 2:
print('data in ',k[0],' wavelengths ',lamdasp[k[0]])
print('counts are ',countrate[k[0]])
nk = len(k[0])
factor = np.zeros( nk )
for m in range(nk): # now loop over all bins that might contribute
wbin1 = lamdasp[k[0][m]] - dlam[k[0][m]]/2
wbin2 = lamdasp[k[0][m]] + dlam[k[0][m]]/2
# width bin_form override with limits bin_to
factor[m] = (np.min(np.array( (wbin2,lam2) )) - np.max(np.array((wbin1 ,lam1))))/ (wbin2-wbin1)
if chatter > 2 :
print(' ... m = ',m,' bin= ',wbin1,' - ',wbin2)
print(' ... trimmed ',np.min(np.array( (wbin2,lam2) )),' - ',np.max(np.array((wbin1 ,lam1))))
new_countrate[i] = (factor * countrate[k[0]]).sum()
new_error[i] = np.sqrt( ( (factor * error[k[0]])**2 ).sum() )
if chatter > 2:
print(' scaled factor = ', factor)
print(' new_countrate = ', new_countrate[i])
#
# check that the total number of counts is the same
print('total counts in = ', countrate.sum())
print('total counts out= ', new_countrate.sum())
#
return lamda_response, new_countrate, new_error
def find_zeroth_orders(filestub, ext, wheelpos, region=False,indir='./',
set_maglimit=None, clobber="NO", chatter=0):
'''
The aim is to identify the zeroth order on the grism image.
This is done as follows:
We run uvotdetect to get the zeroth orders in the detector image.
We also grab the USNO B1 source list and predict the positions on the image using the WCSS header.
Bases on a histogram of minimum distances, as correction is made to the WCSS header, and
also to the USNO-B1 predicted positions.
'''
import os
try:
from astropy.io import fits, ascii
except:
import pyfits as fits
from numpy import array, zeros, log10, where
import datetime
import uvotwcs
from astropy import wcs
if chatter > 0:
print("find_zeroth_orders: determining positions zeroth orders from USNO-B1")
if ((wheelpos == 160) ^ (wheelpos == 200)):
grtype = "ugu"
zp = 19.46 # zeropoint uv nominal zeroth orders for 10 arcsec circular region
else:
grtype = "ugv"
zp = 18.90 # estimated visible grism zeropoint for same
exts = repr(ext)
gfile = os.path.join(indir,filestub+grtype+"_dt.img")
infile = os.path.join(indir,filestub+grtype+"_dt.img["+exts+"]")
outfile = os.path.join(indir,filestub+grtype+"_"+exts+"_detect.fits")
if ((wheelpos == 160) ^ (wheelpos == 200)):
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
else:
command = "uvotdetect infile="+infile+ " outfile="+outfile + \
' threshold=6 sexargs = "-DEBLEND_MINCONT 0.1" '+ \
" expopt = BETA calibrate=NO expfile=NONE "+ \
" clobber="+clobber+" chatter=0 > /dev/null"
if chatter > 1:
print("find_zeroth_orders: trying to detect the zeroth orders in the grism image")
print(command)
useuvotdetect = True
tt = os.system(command)
if tt != 0:
raise('find_zeroth_orders: uvotdetect had a problem with this image\nIs HEASOFT initialised?')
if not os.access(outfile,os.F_OK):
# so you can provide it another way
useuvotdetect = False
rate = 0
if useuvotdetect:
f = fits.open(outfile)
g = f[1].data
h = f[1].header
refid = g.field('refid')
rate = g.field('rate')
rate_err = g.field('rate_err')
rate_bkg = g.field('rate_bkg') # counts/sec/arcsec**2
x_img = g.field('ux_image')
y_img = g.field('uy_image')
a_img = g.field('ua_image') # semi axis
b_img = g.field('ub_image') # semi axis
theta = g.field('utheta_image') # angle of the detection ellipse
prof_major = g.field('prof_major')
prof_minor = g.field('prof_minor')
prof_theta = g.field('prof_theta')
threshold = g.field('threshold') # sigma
flags = g.field('flags')
f.close()
else:
rate_bkg = array([0.08])
hh = fits.getheader(gfile, ext)
exposure = hh['exposure']
ra = hh['RA_PNT']
dec = hh['DEC_PNT']
if "A_ORDER" in hh:
distortpresent = True
else:
distortpresent = False
if chatter > 1:
print("find_zeroth_orders: pointing position ",ra,dec)
# unfortunately uvotdetect will pick up spurious stuff as well near the spectra
# need real sources.
# get catalog sources (B magnitude most closely matches zeroth order)
CALDB = os.getenv('CALDB')
if CALDB == '':
print('find_zeroth_orders: the CALDB environment variable has not been set')
return None
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('find_zeroth_orders: The HEADAS environment variable has not been set')
print('That is needed for the uvot Ftools ')
return None
if set_maglimit == None:
b_background = zp + 2.5*log10( (rate_bkg.std())*1256.6 )
# some typical measure for the image
blim= b_background.mean() + b_background.std() + zeroth_blim_offset
else:
blim = set_maglimit
if blim < background_source_mag: blim = background_source_mag
if np.isnan(blim): blim = 18
# if usno-b1 catalog is present for this position,
# do not retrieve again
if os.access('searchcenter.ub1',os.F_OK):
searchcenterf = open( 'searchcenter.ub1' )
searchcenter= searchcenterf.readline().split(',')
searchcenterf.close()
racen,decen = float(searchcenter[0]),float(searchcenter[1])
if np.abs(ra-racen) + np.abs(dec-decen) < 0.01:
use_previous_search = True
else:
use_previous_search = False
else:
use_previous_search = False
# empty file
if os.access('search.ub1',os.F_OK) :
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
if len(stab) < 3: use_previous_search = False
# retrieve catalog data
if (not os.access('search.ub1',os.F_OK)) | (not use_previous_search):
if (chatter > 4): print ("get_usnob1_cat(%f,%f,%f)"%(ra,dec,blim))
status = get_usnob1_cat(ra, dec, blim)
if status is None:
print('ra={}, dec={}, blim={}'.format(ra, dec, blim))
print("find_zeroth_orders: could not get source list from USNO-B1")
sys.exit()
else:
if chatter > 1:
print("find_zeroth_orders: using the USNO-B1 source list from file search.ub1")
# generate a new catspecfile
_write_catspecfile()
# remove reliance on astropy tables as it fails on debian linux
searchf = open('search.ub1')
stab = searchf.readlines()
searchf.close()
M = len(stab)
ra = []
dec = []
b2mag = []
for row in stab:
row_values = row.split()
if len(row_values) > 6:
ra.append(row_values[1])
dec.append(row_values[2])
b2mag.append(row_values[5])
M = len(ra)
if M == 0:
return
ra = np.asarray(ra,dtype=np.float64)
dec = np.asarray(dec,dtype=np.float64)
b2mag = np.asarray(b2mag,dtype=np.float)
Xa = zeros(M)
Yb = zeros(M)
Thet= zeros(M)
ondetector = zeros(M,dtype=bool)
matched = zeros(M,dtype=bool)
# now find the image coordinates:
#
wcsS = wcs.WCS(header=hh,key='S',relax=True,) # TAN-SIP coordinate type
Xim,Yim = wcsS.wcs_world2pix(ra,dec,0)
xdim, ydim = hh['naxis1'],hh['naxis2']
wheelpos = hh['wheelpos']
if wheelpos == 200:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg)
defaulttheta = 151.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 160:
q1 = (rate > 2.5*rate_bkg) & (rate < 125*rate_bkg) & (x_img > 850)
defaulttheta = 144.4-180.
bins = np.arange(-29.5,29.5,1)
midbin = np.arange(-29,29,1)
elif wheelpos == 955:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg) & (x_img > 850)
defaulttheta = 140.5-180
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
elif wheelpos == 1000:
q1 = (rate > 2.5*rate_bkg) & (rate < 175*rate_bkg)
defaulttheta = 148.1-180.
bins = np.arange(-49.5,49.5,1)
midbin = np.arange(-49,49,1)
Thet -= defaulttheta
Xa += 17.0
Yb += 5.5
# convert sky coord. to positions (Xim , Yim) , and set flag ondetector
for i in range(M):
if not distortpresent:
# now we need to apply the distortion correction:
Xim[i], Yim[i] = uvotwcs.correct_image_distortion(Xim[i],Yim[i],hh)
ondetector[i] = ((Xim[i] > 8) & (Xim[i] < xdim) & (Yim[i] > 8) & (Yim[i] < ydim-8))
xoff = 0.0
yoff = 0.0
# derive offset :
# find the minimum distances between sources in lists pair-wise
distance = []
distx = []
disty = []
kx = -1
dxlim = 100 # maximum distance in X
dylim = 100 # maximum distance in Y
tol = 5 # tolerance in x and y match
xim = x_img[q1]
yim = y_img[q1]
M2 = int(len(xim)*0.5)
for i2 in range(M2): # loop over the xdetect results
i = 2*i2
i1 = 2*i2+1
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
dx1 = np.abs(Xim - xim[i1])
dy1 = np.abs(Yim - yim[i1])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
op1 = (dx1 < dxlim) & (dy1 < dylim)
if op1.sum() != 0:
dis = np.sqrt(dx1[op1]**2+dy1[op1]**2)
kx1 = dis == np.min(dis)
kx1 = np.arange(len(op1))[op1][kx1]
if (np.abs(dx[kx] - dx1[kx1]) < tol ) & (np.abs(dy[kx] - dy1[kx1]) < tol ):
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
distx.append( Xim[kx1] - xim[i1] )
disty.append( Yim[kx1] - yim[i1] )
if ((type(kx) == int) & (chatter > 3)):
print("Xim: ",Xim[kx])
print("xim:",xim)
print("dx: ",dx)
if len(distx) > 0 :
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][:-1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][:-1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
# subtract xoff, yoff from Xim, Yim or add to origin ( hh[CRPIX1S],hh[CRPIX2S] ) if offset
# is larger than 1 pix
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (binary matched offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
else:
# if binary matched offsets don't pan out at all, compute simple offsets
for i in range(len(xim)): # loop over the xdetect results
if (ondetector[i] and useuvotdetect):
dx = np.abs(Xim - xim[i ])
dy = np.abs(Yim - yim[i ])
op = (dx < dxlim) & (dy < dylim)
if op.sum() != 0:
dis = np.sqrt(dx[op]**2+dy[op]**2)
kx = dis == np.min(dis)
kx = np.arange(len(op))[op][kx]
distx.append( Xim[kx] - xim[i ] )
disty.append( Yim[kx] - yim[i ] )
hisx = np.histogram(distx,bins=bins)
#xoff = hisx[1][hisx[0] == hisx[0].max()].mean()
xoff = midbin[hisx[0] == hisx[0].max()].mean()
hisy = np.histogram(disty,bins=bins)
#yoff = hisy[1][hisy[0] == hisy[0].max()].mean()
yoff = midbin[hisy[0] == hisy[0].max()].mean()
if (np.sqrt(xoff**2+yoff**2) > 1.0):
if ("forceshi" not in hh):
hh['crpix1s'] += xoff
hh['crpix2s'] += yoff
hh["forceshi"] = "%f,%f"%(xoff,yoff)
hh["forcesh0"] = "%f,%f"%(xoff,yoff)
print("offset (%5.1f,%5.1f) found"%(xoff,yoff))
print("offset found has been applied to the fits header of file: %s\n"%(gfile))
else:
# do not apply shift to crpix*s for subsequent shifts, but record overall ahift
# original shift is in "forcesh0" which actually WAS applied. Both items are needed
# to reconstruct shifts between pointing image and the source locations (in case
# we allow interactive adjustments of zeroth orders, that would enable pointing updates
# however, the keyword must be reset at start of reprocessing (not done now)
xoff_,yoff_ = np.array((hh["forceshi"]).split(','),dtype=float)
hh["forceshi"] = "%f,%f"%(xoff_+xoff,yoff_+yoff)
f = fits.open(gfile,mode='update')
f[ext].header = hh
f.close()
print("find_zeroth_orders result (simple offset): \n")
print("\tAfter comparing uvotdetect zeroth order positions to USNO-B1 predicted source positions ")
print("\tthere was found an overall offset equal to (%5.1f.%5.1f) pix "%(xoff,yoff))
Xim -= xoff
Yim -= yoff
# find ellipse belonging to source from uvotdetect output, or make up one for all ondetector
xacc = 10
yacc = 6
for i in range(M):
if (ondetector[i] and useuvotdetect):
kx = where ( abs(Xim[i] - x_img) < xacc )
if len(kx[0]) != 0:
kxy = where( abs(Yim[i] - y_img[kx]) < yacc)
if len(kxy[0]) == 1:
k = kx[0][kxy[0][0]]
Xa[i] = prof_major[k]*5.
Yb[i] = prof_minor[k]*5.
Thet[i]= -theta[k]
matched[i] = True
else:
# make up some ellipse axes in pix
Xa[i] = 17.0
Yb[i] = 5.0
if chatter > 0:
print("find_zeroth_orders: there were %i matches found between the uvotdetect sources and the USNO B1 list"%(matched.sum()))
if region:
a = datetime.date.today()
datetime = a.isoformat()[0:4]+a.isoformat()[5:7]+a.isoformat()[8:10]
# make region file for sources on detector
f = open(filestub+'_'+exts+'.reg','w')
f.write('# Region file format: DS9 version 4.1\n')
#f.write('# written by uvotgetspec.findzerothorders python program '+datetime+'\n')
f.write('# Filename: '+infile+'\n')
f.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1 \n')
f.write('physical\n')
for i in range(M):
if (ondetector[i] and useuvotdetect):
f.write('ellipse(%12.2f,%12.2f,%12.2f,%12.2f,%12.2f)\n' % (Xim[i],Yim[i],Xa[i],Yb[i],180.-Thet[i]) )
f.close()
# make a second region file for sources with first order on detector [TBD]
# the sources on the detector are Xim[ondetector] etc.,
# matched[ondetector] are those sources which have both been found by uvotdetect and in the catalog
# the complete list also includes sources off the detector which may have first orders on the
# detector when the B magnitude > ~14.
# the ellipse parameters for the sources which have no uvotdetection (matched=False) are some
# arbitrary mean values. They should be scaled to brightness.
return Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector
def spec_curvature(wheelpos,anchor,order=1,):
'''Find the coefficients of the polynomial for the curvature.
Parameters
----------
wheelpos : int, {160,200,955,1000}
grism filter position in filter wheel
anchor : list, array
anchor position in detector coordinates (pixels)
order : int
the desired spectral order
Returns
-------
Provides the polynomial coefficients for y(x).
Notes
-----
The curvature is defined with argument the pixel coordinate in the dispersion
direction with reference to the the anchor coordinates in det-img
coordinates. The polynomial returns the offset normal to the dispersion.
- 2011-03-07 <NAME>, initial version
- 2011-08-02 fixed nominal coefficients order=1
'''
from scipy import interpolate
from numpy import array
xin = anchor[0] -104
yin = anchor[1] -78
if ((wheelpos == 1000) ^ (wheelpos == 955)):
# return y = 0 + 0.0*x coefficient
return array([0.,0.])
elif wheelpos == 160:
if order == 1:
tck_c1= [array([0.,0.,0.,0.,2048., 2048., 2048., 2048.]), \
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]), \
array([ 0.1329227 , -0.28774943, 0.13672294, -0.18436127, -0.19086855,\
0.23071908, -0.21803703, 0.11983982, 0.16678715, -0.2004285 ,\
0.12813155, -0.13855324, -0.1356009 , 0.11504641, -0.10732287,\
0.03374111]),3,3]
tck_c2 = [array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([0.,0.,0.,0., 2048., 2048., 2048., 2048.]),\
array([ -3.17463632e-04, 2.53197376e-04, -3.44611897e-04,\
4.81594388e-04, 2.63206764e-04, -3.03314305e-04,\
3.25032065e-04, -2.97050826e-04, -3.06358032e-04,\
3.32952612e-04, -2.79473410e-04, 3.95150704e-04,\
2.56203495e-04, -2.34524716e-04, 2.75320861e-04,\
-6.64416547e-05]),3,3]
tck_c3 = [array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ 0.,0.,0.,0.,2048., 2048., 2048., 2048.]),\
array([ -4.14989592e-07, 5.09851884e-07, -4.86551197e-07,\
1.33727326e-07, 4.87557866e-07, -5.51120320e-07,\
5.76975007e-07, -3.29793632e-07, -3.42589204e-07,\
3.00002959e-07, -2.90718693e-07, 5.57782883e-08,\
2.20540397e-07, -1.62674045e-07, 8.70230076e-08,\
-1.13489556e-07]),3,3]
#coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
# interpolate.bisplev(xin,yin,tck_c1), 0.])
coef = array([interpolate.bisplev(xin,yin,tck_c3)*0.5,interpolate.bisplev(xin,yin,tck_c2)*0.5,\
interpolate.bisplev(xin,yin,tck_c1)*0.5, 0.]) #~FIXME:
return coef
elif order == 2:
tck_c0 = [array([ 0., 0., 0., 0., 1134.78683, 2048., 2048., 2048., 2048.]), \
array([ 0., 0., 0., 0., 871.080060, 2048., 2048., 2048., 2048.]), \
array([-110.94246902, 15.02796289, -56.20252149, -12.04954456,\
311.31851187, -31.09148174, -48.44676102, 85.82835905,\
-73.06964994, 99.58445164, 46.47352776, 11.29231744,\
-68.32631894, 88.68570087, -34.78582366, -33.71033771,\
6.89774103, 25.59082616, 23.37354026, 49.61868235,\
-438.17511696, -31.63936231, 28.8779241 , 51.03055925,\
16.46852299]), 3, 3]
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0.52932582, -0.76118033, 0.38401924, -0.189221 , -0.45446129,\
0.73092481, -0.53433133, 0.12702548, 0.21033591, -0.45067611,\
0.32032545, -0.25744487, -0.06022942, 0.22532666, -0.27174491,\
0.03352306]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ -4.46331730e-04, 3.94044533e-04, -1.77072490e-04,\
2.09823843e-04, 3.02872440e-04, -6.23869655e-04,\
5.44400661e-04, -3.70038727e-04, -1.60398389e-04,\
4.90085648e-04, -4.91436626e-04, 4.62904236e-04,\
4.05692472e-05, -2.34521165e-04, 3.04866621e-04,\
-1.25811263e-04]), 3, 3]
#tck_c0 = [array([0.,0., 1132.60995961, 2048.,2048.]),
# array([0.,0., 814.28303687, 2048.,2048.]),
# array([-49.34868162, -0.22692399, -11.06660953, 5.95510567,
# -3.13109456, 37.63588808, -38.7797533 , 24.43177327, 43.27243297]),1,1]
#tck_c1 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ 0.01418938, -0.06999955, -0.00446343, -0.06662488]),1,1]
#tck_c2 = [array([ 0., 0., 2048., 2048.]),
# array([ 0., 0., 2048., 2048.]),
# array([ -9.99564069e-05, 8.89513468e-05, 4.77910984e-05, 1.44368445e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
# not a particularly good fit.
tck_c0 = [array([0., 0., 1101.24169141, 2048.,2048.]),
array([0., 0., 952.39879838, 2048.,2048.]),
array([ -74.75453915, 7.63095536, -131.36395787, 11.14709189,
-5.52089337, 73.59327202, -57.25048374, 37.8898465 ,
65.90098406]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.04768498, -0.02044308, 0.02984554, -0.04408517]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([ 0., 0., 1075.07521348, 2048. ,2048.]),
array([ 0., 0., 1013.70915889, 2048. ,2048.]),
array([ 130.89087966, 25.49195385, 5.7585513 , -34.68684878,
-52.13229007, -168.75159696, 711.84382717, -364.9631271 ,
374.9961278 ]),1,1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.08258587, -0.06696916, -0.09968132, -0.31579981]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
elif wheelpos == 200:
if order == 1:
tck_c1 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([-0.00820665, -0.06820851, 0.04475057, -0.06496112, 0.062989 , \
-0.05069771, -0.01397332, 0.03530437, -0.17563673, 0.12602437,\
-0.10312421, -0.02404978, 0.06091811, -0.02879142, -0.06533121,\
0.07355998]), 3, 3]
tck_c2 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.69259046e-04, -1.67036380e-04, -9.95915869e-05, \
2.87449321e-04, -4.90398133e-04, 3.27190710e-04, \
2.12389405e-04, -3.55245720e-04, 7.41048332e-04, \
-4.68649092e-04, -1.11124841e-04, 6.72174552e-04, \
-3.26167775e-04, 1.15602175e-04, 5.78187743e-04, \
-8.79488201e-04]), 3, 3]
tck_c3 = [array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 0., 0., 0., 0., 2048., 2048., 2048., 2048.]),\
array([ 1.11106098e-07, 2.72305072e-07, -7.24832745e-07,\
4.65025511e-07, -2.35416547e-07, -3.87761080e-07,\
1.05955881e-06, -6.46388216e-07, 3.15103869e-07,\
5.48402086e-07, -1.44488974e-06, 6.52867676e-07,\
1.14004672e-08, -9.48879026e-07, 1.64082320e-06,\
-8.07897628e-07]), 3, 3]
# the linear fit fails at the right side (57020002) but is quite good otherwise:
#tck_c1 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([-0.02212781, -0.00873168, -0.00377861, -0.02478484]), 1, 1]
#
#tck_c2 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]),\
# array([ -6.75189230e-05, 6.19498966e-05, 5.22322103e-05, 7.75736030e-05]), 1, 1]
#
#tck_c3 = [array([ 0., 0., 2048., 2048.]), array([ 0., 0., 2048., 2048.]), \
# array([ -1.75056810e-09, -3.61606998e-08, -6.00321832e-09, -1.39611943e-08]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c3),interpolate.bisplev(xin,yin,tck_c2),\
interpolate.bisplev(xin,yin,tck_c1), 0.])
return coef
elif order == 2:
tck_c0 = [array([0.,0., 956.25596245, 2048.,2048.]),
array([0.,0., 1067.40622524, 2048.,2048.]),
array([ 17.82135471, -4.93884392, 20.55439437, -18.22869669,
13.11429182, 41.2680039 , 9.8050793 , 32.72362507, -6.56524782]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.02362119, -0.03992572, 0.0177935 , -0.10163929]),1, 1]
tck_c2 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ -6.32035759e-05, 5.28407967e-05, -8.87338917e-06, 8.58873870e-05]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c2),interpolate.bisplev(xin,yin,tck_c1),\
interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 3:
tck_c0 = [array([ 0. , 0. , 807.44415249, 2048.,2048.]),
array([ 0. , 0. , 1189.77686531, 2048.,2048.]),
array([-5436.10353688, 218.93823252, -254.71035527, -24.35684969,
23.26131493, 51.66273635, 37.89898456, 46.77095978,
63.22039872]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([-0.02591263, -0.03092398, 0.00352404, -0.01171369]), 1, 1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
elif order == 0:
tck_c0 = [array([0.,0., 798.6983833, 2048., 2048.]),
array([0.,0., 1308.9171309, 2048., 2048.]),
array([ 1244.05322027, 24.35223956, -191.8634177 , -170.68236661,
-4.57013926, 20.35393124, -365.28237355, -235.44828185, -2455.96232688]), 1, 1]
tck_c1 = [array([ 0., 0., 2048., 2048.]),
array([ 0., 0., 2048., 2048.]),
array([ 0.54398146, -0.04547362, -0.63454342, -0.49417562]),1,1]
coef = array([interpolate.bisplev(xin,yin,tck_c1),interpolate.bisplev(xin,yin,tck_c0)])
return coef
else:
raise (ValueError)
else:
print('spec_curvature: illegal wheelpos value')
raise (ValueError)
def get_coi_box(wheelpos):
# provide half-width, length coi-box and factor
# typical angle spectrum varies with wheelpos
# 29,27,31,28 3x8/cos([144.5,151.4,140.5,148.1]) for wheelpos = 160,200,955,1000
coistuff = {'160':(7.5,29,1.11),
'200':(7.5,27,1.12),
'955':(6.5,31,1.09),
'1000':(7.0,28,1.13),}
return coistuff[str(wheelpos)]
def curved_extraction(extimg,ank_c,anchor1, wheelpos, expmap=None, offset=0., \
anker0=None, anker2=None, anker3=None, angle=None, offsetlimit=None, \
background_lower=[None,None], background_upper=[None,None],background_template=None,\
trackonly=False, trackfull=False, caldefault=True, curved="noupdate", \
poly_1=None,poly_2=None,poly_3=None, set_offset=False, \
composite_fit=True, test=None, chatter=0, skip_field_sources=False,\
predict_second_order=True, ZOpos=None,outfull=False, msg='',\
fit_second=True,fit_third=True,C_1=None,C_2=None,dist12=None, ifmotion=True,\
dropout_mask=None,obsid=None,indir=None,motion_file=None,ank_c_0offset=False,ifextended=False,fixwidth=False):
'''This routine knows about the curvature of the spectra in the UV filters
can provide the coefficients of the tracks of the orders
can provide a gaussian fit to the orders
extimg = extracted image
ank_c = array( [ X pos anchor, Y pos anchor, start position spectrum, end spectrum]) in extimg
anchor1 = anchor position in original image in det coordinates
wheelpos = filter wheel position
ZOpos variables defining Zeroth Order positions
angle [req with ZOpos]
background_template - if provided, the background will be based on this
dropout_mask from extractSpecImg
override curvature polynomial coefficients with poly_1,poly_2,poly_3
i.e., after a call to updateFitorder()
output new array of sum across fixed number of pixels across spectrum for coincidence loss
width of box depends on parameter coi_half_width
NPMK, 2010-07-09 initial version
2012-02-20 There was a problem with the offset/track y1 position/borderup,borderdown consistency
when using a prescribed offset. Changing handling. Always make a fine yank adjustment < 3 pix.
disabled for now the set_offset (it does not do anything).
2012-02-20 moved the call to updateFitorder() to curved_extraction. The result is that the
spectrum will be extracted using the updated track parameters.
2014-06-02 add support for fixed box extraction coincidence loss.
2014-08-04 add parameter curved_extraction to limit y-positioning extraction slit with list option
2014-08-06 changed code to correctly adjust y1 position
2014-08-25 fixed error in curve of location orders except first one
2016-01-17 trackcentroiding parameter added to disable centroiding
'''
import pylab as plt
from numpy import array,arange,where, zeros,ones, asarray, abs, int
from uvotplot import plot_ellipsoid_regions
import uvotmisc
anky,ankx,xstart,xend = ank_c
xstart -= ankx
xend -= ankx
anchor2 = anchor1
if test == 'cal':
from cal3 import get_1stOrderFit, get_2ndOrderFit ,get_3rdOrderFit, get_0thOrderFit
from cal3 import nominaluv, clockeduv
if wheelpos == 160:
curves = clockeduv
elif wheelpos == 200:
curves = nominaluv
else:
print("use straight extraction for V grism modes")
return
if wheelpos > 300:
return
# coincidence loss box
coi_half_width,coilength,coifactor = get_coi_box(wheelpos)
# read the table of coefficients/get the coeeficients of the Y(dis) offsets and limits[]
# stored with array of angles used.
# ZEROTH ORDER CURVATURE
if test == 'notyetcal':
coef0 = get_0thOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef0 = spec_curvature(wheelpos,anchor2,order=0)
dlim0L=-820
dlim0U=-570
present0=True
if (xstart > dlim0U):
present0=False
coef0 = array([0.,0.])
if (xstart > dlim0L): dlim0L = xstart
# FIRST ORDER CURVATURE
if test == 'cal':
coef1 = get_1stOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef1 = spec_curvature(wheelpos,anchor2,order=1)
#coef1[0] = -3.08e-9
#coef1[1] = 5.89e-6
#coef1[2] = -9.21e-3
dlim1L=-400
dlim1U=1150
present1=True
if (xstart > dlim1L): dlim1L = xstart
if (xend < dlim1U): dlim1U = xend
# SECOND ORDER CURVATURE
if test == 'cal':
coef2 = get_2ndOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef2 = spec_curvature(wheelpos,anchor2,order=2)
dlim2L=25
dlim2U=3000
if (xstart > dlim2L): dlim2L = xstart
if (xend < dlim2U): dlim2U = xend
if (xend > dlim2L):
present2=True
else: present2=False
# THIRD ORDER CURVATURE
if test == 'cal':
coef3 = get_3rdOrderFit(xin=anchor2[0],yin=anchor2[1],curvedata=curves)
else:
coef3 = spec_curvature(wheelpos,anchor2,order=3)
dlim3L=425
dlim3U=3000
if (xstart > dlim3L): dlim3L = xstart
if (xend < dlim3U): dlim3U = xend
if (xend > dlim3L):
present3=True
else: present3=False
# good first approximation:
# if wheelpos == 160:
sig0coef=array([4.7])
sig1coef=array([-8.22e-09, 6.773e-04, 3.338])
#sig1coef=array([1.6*(-8.22e-09), 1.6*(6.773e-04), 1.6*3.338]) #~FIXME: try changing sigma
#sig1coef=array([ 3.0])
sig2coef=array([-5.44e-07, 2.132e-03, 3.662])
sig3coef=array([0.0059,1.5])
# override coefficients y(x):
print ("DEBUG 3431 type coef1 is ", type(coef1) )
print ("DEBUG 3432 type poly_1 is ",type(poly_1))
if (type(poly_1) != typeNone): coef1 = poly_1
if (type(poly_2) != typeNone): coef2 = poly_2
if (type(poly_3) != typeNone): coef3 = poly_3
#===================================================================
if chatter > 0:
print('================== curvature fits for y ==============')
print('zeroth order poly: ',coef0)
print('first order poly: ',coef1)
print('second order poly: ',coef2)
print('third order poly: ',coef3)
print('======================================================')
#===================================================================
# remove background
#if cval == None: cval = out_of_img_val = -1.0123456789 cval now global
if chatter > 3 : print ("DEBUG 3453 remove background")
bg, bg1, bg2, bgsig, bgimg, bg_limits, \
(bg1_good, bg1_dis, bg1_dis_good, bg2_good, bg2_dis, bg2_dis_good, bgimg_lin) \
= findBackground(extimg,background_lower=background_lower,
background_upper=background_upper,yloc_spectrum=anky, chatter=2)
if background_template != None:
bgimg = background_template['extimg']
spimg = extimg - bgimg
ny,nx = spimg.shape
# initialise quality array, exposure array for spectrum and flags
quality = zeros(nx,dtype=int)
expospec = zeros(5*nx,dtype=int).reshape(5,nx)
qflag = quality_flags()
# get the mask for zeroth orders in the way
if chatter > 3 : print ("DEBUG 3470 get mask zeroth orders ")
# set bad done while extracting spectra below
set_qual = ((not skip_field_sources) & (ZOpos != None) & (angle != None))
if set_qual:
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
# find_zeroth_orders(filestub, ext, wheelpos,clobber="yes", )
dims = array([nx,ny])
pivot_ori=array([(anchor1)[0],(anchor1)[1]])
pivot= array([ank_c[1],ank_c[0]])
# map down to 18th magnitude in B2 (use global variable uvotgetspec.background_source_mag)
m_lim = background_source_mag
map_all = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map all: shape=",map_all.shape," min, max =",map_all.min(), map_all.max())
# map down to 16th magnitude in B2
m_lim = 16.0
map_strong = plot_ellipsoid_regions(Xim.copy(),Yim.copy(),Xa.copy(),Yb.copy(),Thet.copy(),\
b2mag.copy(),matched.copy(), ondetector,pivot,pivot_ori,dims,m_lim,img_angle=angle-180.0,\
lmap=True,makeplot=False,chatter=chatter)
if chatter > 2:
print("zeroth order map strong: shape=",map_strong.shape," min, max =",map_strong.min(), map_strong.max())
# tracks - defined as yi (delta) = 0 at anchor position (ankx,anky)
if chatter > 3 : print ("DEBUG 3500 set up y arrays ")
# shift to first order anchor
x = array(arange(nx))-ankx
y = zeros(nx)+anky
y0 = zeros(nx)+anky - polyval(coef1,0)
y1 = zeros(nx)+anky - polyval(coef1,0)
y2 = zeros(nx)+anky - polyval(coef1,0)
y3 = zeros(nx)+anky - polyval(coef1,0)
q0 = where((x >= dlim0L) & (x <= dlim0U))
x0 = x[q0]
if present0: y0[q0] += polyval(coef0,x[q0])
q1 = where((x >= dlim1L) & (x <= dlim1U))
x1 = x[q1]
if present1: y1[q1] += polyval(coef1,x[q1])
q2 = where((x >= dlim2L) & (x <= dlim2U))
x2 = x[q2]
if present2: y2[q2] += polyval(coef2,x[q2])
q3 = where((x >= dlim3L) & (x <= dlim3U))
x3 = x[q3]
if present3: y3[q3] += polyval(coef3,x[q3])
if trackcentroiding: # global (default = True)
if chatter > 3 : print ("DEBUG 3522 centroid track")
# refine the offset by determining where the peak in the
# first order falls.
# We NEED a map to exclude zeroth orders that fall on/near the spectrum
ny = int(ny)
cp2 = zeros(ny)
cp2_spimg = zeros(spimg.shape) #~TODO:
delpix = 50
if wheelpos == 200: delpix=25 # the accuracy for the nominal uv anchor is not as good.
offsetset = False
if type(offsetlimit) == list:
offsetval = offsetlimit[0]
delpix = array([abs(offsetlimit[1]),1],dtype=int).max() # at least 1
if offsetlimit[1] < 1.:
offsetset = True
else:
print('curved_extraction: offsetlimit=',offsetlimit,' delpix=',delpix)
eo = int(anky-slit_width/2)
if set_offset:
eo = int(offset-slit_width/2)
for q in q1[0]:
if ((x[q] < 600) & (x[q] > -200) & (quality[q] == 0)):
try:
m0 = 0.5*ny-delpix + eo #int( (ny+1)/4)
m1 = 0.5*ny+delpix + eo #int( 3*(ny+1)/4)+1
yoff = y1[q] - anky # this is just the offset from the anchor since y1[x=0] was set to anky
cp2[int(m0-yoff):int(m1-yoff)] += spimg[int(m0):int(m1),q].flatten()
cp2_spimg[int(m0-yoff):int(m1-yoff),q] += spimg[int(m0):int(m1),q].flatten()
except:
print("skipping slice %5i in adjusting first order y-position"%(q))
pass
fig = plt.figure()
plt.title(obsid)
#plt.show()
#print(np.sum(cp2_spimg[:,1632:1832],axis=1),len(np.sum(cp2_spimg[:,200:400],axis=1)))
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1032:1232],axis=1)/expmap[0],label='-200-0/1032-1232')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1232:1432],axis=1)/expmap[0],label='0-200/1232-1432')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1432:1632],axis=1)/expmap[0],label='200-400/1432-1632')
plt.plot(arange(slit_width),np.sum(cp2_spimg[:,1632:1832],axis=1)/expmap[0],label='400-600/1632-1832')
plt.legend()
plt.ylabel('count rate per bin')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_wing.png')
#plt.show()
plt.close()
if offsetset:
yof = offsetval - anky
if chatter > 1:
print("spectrum location set with input parameter to: y=%5.1f"%(offsetval))
msg += "spectrum location set with input parameter to: y=%5.1f\n"%(offsetval)
else:
if ifmotion:
motion = abs(obsid2motion(obsid,motion_file)['V'])
(p0,p1,p2), ier = leastsq(Fun4, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width),motion) ) #~FIXME:
sigma_mean=np.mean(polyval(sig1coef,x))
#p3= motion
elif fixwidth:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean=fixwidth/trackwidth #np.mean(polyval(sig1coef,x))
times = sigma_mean/np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
elif ifextended:
(p0,p1,p2), ier = leastsq(Fun1, (cp2.max(),anky,3.2), args=(cp2,arange(slit_width)) )
sigma_mean = p2
times = p2/np.mean(polyval(sig1coef,x))
#times = 1.
#sigma_mean = times*np.mean(polyval(sig1coef,x))
sig0coef = times*sig0coef
sig1coef = times*sig1coef
sig2coef = times*sig2coef
sig3coef = times*sig3coef
else:
(p0,p1), ier = leastsq(Fun1b, (cp2.max(),anky), args=(cp2,arange(slit_width),3.2) )
sigma_mean=np.mean(polyval(sig1coef,x))
#print(p0,p1,p2,p3,sigma_mean)
fig = plt.figure()
if ifmotion:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),smeargaussian(arange(slit_width),p0,p1,sigma_mean,motion))
plt.vlines(p1-(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean+motion/2),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid+' motion:'+"%.2f"%motion)
elif fixwidth:
np.savetxt(indir+'/'+obsid+'_fit.txt',np.transpose(np.array([arange(slit_width),cp2])),delimiter=',',fmt='%.2f') #~FIXME:
with open(indir+'/'+obsid+'_fit.txt','r+') as f:
content = f.read()
f.seek(0,0)
f.write('A:'+f'{p0:.2f}'+' mu:'+f'{p1:.2f}'+' sigma:'+f'{p2:.2f}'+'\n'+content)
f.close()
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,p2))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
else:
plt.plot(arange(slit_width),cp2)
plt.plot(arange(slit_width),singlegaussian(arange(slit_width),p0,p1,sigma_mean))
plt.vlines(p1-(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.vlines(p1+(trackwidth *sigma_mean),0,np.max(cp2),color='k')
plt.xlabel('y pixels')
plt.ylabel('total counts')
plt.title(obsid)
plt.savefig(indir+'/'+obsid+'_fit.png')
#plt.show()
plt.close()
yof = (p1-anky)
if ank_c_0offset == True:
yof = 0
if chatter > 1:
print("\n *** cross-spectrum gaussian fit parameters: ",p0,p1)
print("the first anchor fit with gaussian peaks at %5.1f, and the Y correction\nis %5.1f (may not be used)" % (p1,yof))
#### should also estimate the likely wavelength error from the offset distance p1 and print
#msg += "cross-spectrum gaussian fit parameters: (%5.1f ,%5.1f)\n" % (p0,p1)
#msg += "the first anchor fit with gaussian peaks at %5.1f, and the Y correction was %5.1f\n" % (p1,yof)
else:
set_offset = True
offsetset = False
# so now shift the location of the curves to match the first order uv part.
if set_offset:
# ignore computed offset and offsetlimit [,] but used passed offset argument
y0 += offset
y1 += offset
y2 += offset
y3 += offset
print("shifting the y-curve with offset passed by parameter")
else:
# assuming the relative position of the orders is correct, just shift the whole bunch
y0 += yof
y1 += yof
y2 += yof
y3 += yof
if not set_qual:
map = None
print("no zeroth order contamination quality information available ")
quality[:] = qflag['good']
# OUTPUT PARAMETER spectra, background, slit init - full dimension retained
if chatter > 3 : print ("DEBUG 3594 set up spectrum arrays ")
# initialize
sp_all = zeros(nx) + cval # straight slit
bg_all = zeros(nx) + cval # straight slit
# spectrum arrays
sp_zeroth = zeros(nx) + cval # curved extraction
sp_first = zeros(nx) + cval # curved extraction
sp_second = zeros(nx) + cval # curved extraction
sp_third = zeros(nx) + cval # curved extraction
bg_zeroth = zeros(nx) + cval # curved extraction
bg_first = zeros(nx) + cval # curved extraction
bg_second = zeros(nx) + cval # curved extraction
bg_third = zeros(nx) + cval # curved extraction
# coi-area arrays
co_zeroth = zeros(nx) + cval
co_first = zeros(nx) + cval
co_second = zeros(nx) + cval
co_third = zeros(nx) + cval
co_back = zeros(nx) + cval
# quality flag arrays
at1 = zeros(nx,dtype=bool)
at2 = zeros(nx,dtype=bool)
at3 = zeros(nx,dtype=bool)
apercorr = zeros(5*nx).reshape(5,nx) + cval
borderup = zeros(5*nx).reshape(5,nx) + cval
borderdown = zeros(5*nx).reshape(5,nx) + cval
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3),(
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back)
if trackonly: # output the coordinates on the extimg image which specify the lay of
# each order
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr #, expospec, msg, curved
else: return fitorder
if not trackfull:
if (curved == "update") & (not trackcentroiding):
# the hope is, that with more data the calibration can be improved to eliminate this step
#try:
fitorder2, fval, fvalerr = updateFitorder(extimg, fitorder, wheelpos, full=True,
predict2nd=predict_second_order, fit_second=fit_second, fit_third=fit_second,
C_1=C_1, C_2=C_2, d12=dist12, chatter=chatter)
msg += "updated the curvature and width fit parameters\n"
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third ),(
x,xstart,xend,sp_all,quality,co_back) = fitorder2
# update the anchor y-coordinate
ank_c[0] = y1[int(ank_c[1])]
#except:
# msg += "WARNING: fit order curvature update has failed\n"
# curved = "curve"
if offsetset & (not trackcentroiding):
mess = "%s\nWARNING Using offsetlimit with parameter *curved = 'update'* \n"\
"WARNING Therefore we updated the curvature, and besides the curvature, the\n"\
"Y-position of the extraction region was updated to y1[ankx]=%5.1f and \n"\
"does not equal the offsetlimit value of %5.1f \n%s"%(30*"=*=",
y1[int(ankx)],offsetlimit[0],30*"=*=")
print(mess)
mess = "Updated the curvature, and besides the curvature, the Y-position \n"\
" of the extraction region was updated to y1[ankx]=%5.1f and does\n"\
" not equal the offsetlimit value of %5.1f \n"%(y1[int(ankx)],offsetlimit[0])
msg += mess+"\n"
# default single track extraction
sphalfwid = 4.*sig1coef[0]
spwid = 2*sphalfwid
splim1 = int(slit_width/2+offset-sphalfwid+1)
splim2 = int(splim1 + spwid)
sp_all = extimg[splim1:splim2,:].sum(axis=0).flatten()
bg_all = bgimg[splim1:splim2,:].sum(axis=0).flatten()
borderup[4,:] = splim2
borderdown[4,:] = splim1
# background for coi-loss box - using a 3x larger sampling region
k1 = int(anky-3*coi_half_width+0.5)
co_back = bgimg[k1:k1+int(6*coi_half_width),:].sum(axis=0)/3.0
if present0:
for i in range(nx):
sphalfwid = trackwidth*polyval(sig0coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = splim1+y0[i]-anky
k1 = int(y0[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y0[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q0[0]:
co_zeroth[i] = extimg[k3:k4,i].sum()
sp_zeroth[i] = extimg[k1:k2,i].sum()
bg_zeroth[i] = bgimg[k1:k2,i].sum()
borderup[0,i] = k2
borderdown[0,i] = k1
apercorr[0,i] = x_aperture_correction(k1,k2,sig0coef,x[i],norder=0,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[0,i] = expmap[0]
else: expospec[0,i] = expmap[k1:k2,i].mean()
if present1:
#if ifmotion:
# apercorr_value = x_aperture_correction(0,0,sig1coef,100,norder=1,mode='gaussian',
# sigma=p2,motion=motion,tw=trackwidth,ifmotion=ifmotion)
for i in range(nx):
if ifmotion:
sphalfwid = trackwidth *polyval(sig1coef,x[i])+motion/2 #~FIXME:
else:
sphalfwid = trackwidth * polyval(sig1coef,x[i])
# if (x[i] < 30): sphalfwid *= bluetrackwidth
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y1[i]-anky+0.5)
k1 = int(y1[i] - sphalfwid + 0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y1[i] - coi_half_width + 0.5)
k4 = k3 + int(2*coi_half_width) #--TODO:FIXME:
k5 = y1[i]
if i in q1[0]:
co_first[i] = extimg[k3:k4,i].sum()
sp_first[i] = extimg[k1:k2,i].sum()
bg_first[i] = bgimg[k1:k2,i].sum()
borderup[1,i] = k2
borderdown[1,i] = k1
if ifmotion:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,mode='gaussian',
sigma=polyval(sig1coef,x[i]),motion=motion,ifmotion=ifmotion,wheelpos=wheelpos,fixwidth=fixwidth)
# apercorr[1,i] = apercorr_value
else:
apercorr[1,i] = x_aperture_correction(k1,k2,sig1coef,x[i],norder=1,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[1,i] = expmap[0]
else: expospec[1,i] = expmap[k1:k2,i].mean()
if dropout_mask != None:
at3[i] = dropout_mask[k1:k2,i].any()
if set_qual:
k5 = int(y1[i] - 49 + 0.5)
k6 = k1 + int(98+0.5)
if ny > 20:
# all zeroth orders of sources within coi-distance:
at1[i] = (map_all[i,k3:k4] == False).any()
if ny > 100:
# strong sources: circle 49 pix radius hits the centre of the track
at2[i] = (map_strong[i,k5:k6] == False).any()
quality[at1] = qflag['weakzeroth']
quality[at2] = qflag['zeroth']
quality[at3] = qflag['bad']
if present2:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig2coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1 changes 19-feb-2012
#splim2 = splim1 + spwid
#k1 = int(splim1+y2[i]-anky+0.5)
k1 = int(y2[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y2[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q2[0]:
co_second[i] = extimg[k3:k4,i].sum()
sp_second[i] = extimg[k1:k2,i].sum()
bg_second[i] = bgimg[k1:k2,i].sum()
borderup[2,i] = k2
borderdown[2,i] = k1
apercorr[2,i] = x_aperture_correction(k1,k2,sig2coef,x[i],norder=2,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[2,i] = expmap[0]
else: expospec[2,i] = expmap[k1:k2,i].mean()
y1_y2 = np.abs(0.5*(k2+k1) - 0.5*(borderup[1,i]-borderdown[1,i]))
s1_s2 = 0.5*(np.polyval(sig1coef,x[i]) + np.polyval(sig2coef, x[i]) )
if ( y1_y2 < s1_s2) : quality[i] += qflag.get('overlap')
if present3:
for i in range(nx):
sphalfwid = trackwidth * polyval(sig3coef,x[i])
spwid = 2*sphalfwid
#splim1 = 100+offset-sphalfwid+1
#splim2 = splim1 + spwid
#k1 = int(splim1+y3[i]-anky+0.5)
k1 = int(y3[i] - sphalfwid +0.5)
k2 = k1 + int(spwid+0.5)
k3 = int(y3[i] - coi_half_width + 0.5)
k4 = k1 + int(2*coi_half_width)
if i in q3[0]:
co_third[i] = extimg[k3:k4,i].sum(axis=0)
sp_third[i] = extimg[k1:k2,i].sum(axis=0)
bg_third[i] = bgimg[k1:k2,i].sum(axis=0)
borderup[3,i] = k2
borderdown[3,i] = k1
apercorr[3,i] = x_aperture_correction(k1,k2,sig3coef,x[i],norder=3,wheelpos=wheelpos,fixwidth=fixwidth)
if len(expmap) == 1: expospec[3,i] = expmap[0]
else: expospec[3,i] = expmap[k1:k2,i].mean()
# y0,y1,y2,y3 now reflect accurately the center of the slit used.
if chatter > 3 : print ("DEBUG 3792 stacking results in structure fitorder")
fitorder = (present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first, co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third, co_third ),(
x,xstart,xend,sp_all,quality,co_back)
#~FIXME:
if outfull:
return fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved
else: return fitorder
#===================
# Now calculate the probability distributions across the orders using gaussian fits
# this section was for development only
if trackfull: #~FIXME: # fit the cross profile with gaussians; return the gaussian fit parameters
if chatter > 3 : print ("DEBUG 3810 full-track update with mfit")
# output parameter gfit:
# define output per x[i]: numpy array gfit.shape= (6,nx) of: (x,order,amplitude,y_pix_position,sig,flags)
gfit = np.zeros( 4*6*nx ).reshape(4,6,nx) -1
#check that y1,y2,y3 are full length arrays
if not ( (len(y1) == nx) & (len(y2) == nx) & (len(y3) == nx) ):
print("FATAL error in uvotgetspec.curved_extraction array sizes wrong")
# this parameter allows you to restrict the range along the dispersion being considered
if (test == None) | (test == 'cal'):
ileft = 2
irite = nx -2
else:
ileft = test[0]
irite = test[1]
for i in range(ileft,irite):
if chatter > 3: print("uvotgetspec.curved_extraction [trackfull] fitting i = %2i x=%6.2f"%(i,x[i]))
# do the zeroth order
if i in q0[0]:
Ypos = (array( [y0[i]])).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig0coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[0,:,i] = [i,0,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the first order
if ((i in q1[0]) & (i not in q2[0])) :
Ypos = array( [y1[i]] ).flatten()
Xpos = arange(i-2,i+3)
sigmas = sig1coef
(par, flag), junk = get_components(Xpos,spimg,Ypos,wheelpos,\
caldefault=caldefault,sigmas=sigmas)
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if chatter > 3: print(i, par, flag)
# do the second order
if ((i in q1[0]) & (i in q2[0]) & (i not in q3[0])):
Ypos = array( [y1[i],y2[i]]).flatten()
Xpos = arange(i-3,i+4)
sigmas = array([ sig1coef[0], sig2coef[0] ])
if chatter > 3: print('++++ second order Xpos:',Xpos,' Ypos: ', Ypos,' wheelpos ',wheelpos)
Z = get_components(Xpos,spimg,Ypos,wheelpos,composite_fit=composite_fit,\
caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) == 6:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if chatter > 3: print(i); print(par[0:3]); print(par[3:6]); print(flag)
# do the third order
if ((i in q1[0]) & (i in q2[0]) & (i in q3[0])):
Ypos = array([y1[i],y2[i],y3[i]]).flatten()
Xpos = arange(i-4,i+5)
sigmas = array([sig1coef[0], sig2coef[0], sig3coef[0]])
if chatter > 3: print('+++++ third order Xpos:',Xpos,' Ypos: ', Ypos,' * * * 3 3 3 3 3 * * *')
width = abs( polyval(array([2.0e-05, 0.034, -70]),(anchor2[1]-1200.)))+5.0 # rough limits
try:
Z = get_components(Xpos,spimg,Ypos,wheelpos,chatter=chatter,width=width,\
composite_fit=composite_fit,caldefault=caldefault,sigmas=sigmas)
par, flag = Z[0]
except:
print("failed 3rd order fitting width = ",width)
print("Ypos = ",Ypos)
print("Xpos range ",i-4,i+5, " sigmas = ",sigmas, " wheelpos = ",wheelpos)
print("composite_fit:",composite_fit," caldefault:",caldefault)
print(par)
print(flag)
par = array([0.,y1[i],3.,0.,y2[i],4.,0.,y3[i],6.])
flag = array([9,9,9,9,9,9])
flags = str(flag[0])+str(flag[1])+str(flag[2])+str(flag[3])+str(flag[4])+str(flag[5])
iflags = int(flags)
gfit[1,:,i] = [i,1,par[0],par[1],par[2],iflags]
if len(par) > 4:
gfit[2,:,i] = [i,2,par[3],par[4],par[5],iflags]
if len(par) == 9:
gfit[3,:,i] = [i,3,par[6],par[7],par[8],iflags]
if chatter > 3:
print(i); print(par[0:3]) ; print(par[3:6]) ; print(par[6:9]) ; print(iflags)
# thing not covered (properly):
# -- the second order falls on the first and the third order not
# -- one of the orders is not on the detector
# -- order overlap
# -- minus one order
return fitorder, gfit, (bgimg,)
def x_aperture_correction(k1,k2,sigcoef,x,norder=None, mode='best', coi=None, wheelpos=None, sigma=3.2,motion=10, tw=2.5, ifmotion=True, fixwidth=False):
'''Returns the aperture correction factor
parameters
----------
k1,k2 : int
k1 edge of track, k2 opposite track edge
in pixel coordinates
sigcoef : list
polynomial coefficient of the fit to the track width
so that sigma = polyval(sigcoef,x)
x : float
pixel/channel position
norder: int
order of the spectrum
mode : 'best'|'gaussian'
'gaussian' option causes first order to be treated as a gaussian PSF
coi : None
not implemented
wheelpos : 160|200|955|1000
filter wheel position
Notes
-----
The aperture correction is returned for given sigcoef and position x
Using the measured cumulative profile normal to the dispersion for the
first order (faint spectrum) or gaussians for orders zero,second, third.
History:
2012-02-20 Split out in preparation of non-gaussian aperture correction factor
2012-10-06 Dependence on coi-factor identified as a likely parameter
changing the PSF (no further action)
2013-12-15 revised aperture functions, one for each grism (low coi)
'''
import uvotmisc
import scipy
from scipy.interpolate import interp1d, splev
import numpy as np
apercorr = 1.0
if fixwidth:
apercorr = np.ones(np.shape(apercorr)) #~FIXME: I must remove this line to do apercorr
return apercorr
if norder == 0:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 1:
# low coi apertures (normalised to 1 at aperture with half-width 2.5 sigma)
# fitted polynomials to the aperture (low-coi)
#for 0<aperture<6 sig
polycoef160 = np.array([ 1.32112392e-03, -2.69269447e-02, 2.10636905e-01,
-7.89493710e-01, 1.43691688e+00, -2.43239325e-02])
polycoef200 = np.array([ 1.29297314e-03, -2.66018405e-02, 2.10241179e-01,
-7.93941262e-01, 1.44678036e+00, -2.51078365e-02])
#y200 = polyval(polycoef200,x)
polycoef1000a = np.array([ 0.00260494, -0.04792046, 0.33581242, -1.11237223, 1.74086898,
-0.04026319]) # for aperture <= 2.2 sig, and for larger:
polycoef1000b = np.array([ 0.00128903, 0.00107042, 0.98446801])
polycoef955 = np.array([ 0.00213156, -0.03953134, 0.28146284, -0.96044626, 1.58429093,
-0.02412411]) # for aperture < 4 sig
# best curves for the apertures (using aperture.py plots WD1657+343)
aper_160_low = {
# half-width in units of sig
"sig": [0.00,0.30,0.51,0.700,0.90,1.000,1.100,1.200,1.400,
1.600,1.800,2.000,2.20,2.5,2.900,3.31,4.11,6.00],
# aperture correction, normalised
"ape": [0.00,0.30,0.52,0.667,0.77,0.818,0.849,0.872,0.921,
0.947,0.968,0.980,0.99,1.0,1.008,1.01,1.01,1.01]
}
aper_200_low = {
"sig": [0.0,0.300,0.510,0.700,0.800,0.900,1.000,1.10,1.20,
1.40, 1.60, 1.80, 2.0, 2.2, 2.5, 2.7, 3.0,4.0,6.0],
"ape": [0.0,0.308,0.533,0.674,0.742,0.780,0.830,0.86,0.89,
0.929,0.959,0.977,0.986,0.991,1.0,1.002,1.003,1.004,1.005 ]
}
aper_1000_low = {
"sig": [0.0, 0.3, 0.5, 0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2.0,2.2,2.5,3.0 ,4.0 ,6.0 ],
"ape": [0.0,0.37,0.55,0.68,0.74,0.80,0.85,0.91,0.96,0.98,0.995,1. ,1. ,1.004,1.01,1.01]
}
aper_955_med = {
"sig": [0.0,0.30,0.60,0.80,1.00,1.30,1.60,1.80,2.00,2.50,3.00, 4.00,6.00],
"ape": [0.0,0.28,0.47,0.64,0.75,0.86,0.93,0.96,0.97,1.00,1.013,1.02,1.02]
}
aper_1000_med = {
"sig": [0.0,0.30,0.50,0.70,0.80,0.90,1.00,1.20,1.40,1.60,
1.80,2.00,2.20,2.50,3.00,4.00,6.00],
"ape": [0.0,0.34,0.46,0.63,0.68,0.73,0.76,0.87,0.90,0.94,
0.96,0.98,0.99,1.00,1.015,1.027,1.036]
}
renormal = 1.0430 # calibration done with aperture correction 1.043 (sig=2.5)
sig = np.polyval(sigcoef,x) # half width parameter sig in pixels
xx = 0.5*(k2-k1)/sig # half track width in units of sig
if (mode == 'gaussian'):# | (xx > 4.5):
if ifmotion:
apercorr = 1.0/uvotmisc.SmearGaussianHalfIntegralFraction(sigma,motion,tw) #~FIXME:
else:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
elif (wheelpos != None):
# low coi for wheelpos = 160,200; medium coi for wheelpos = 955, 1000
if wheelpos == 160:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf1 = interp1d(aper_160_low['sig'],aper_160_low['ape'],)
apercorr = renormal / apercf1(xx)
if wheelpos == 200:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf2 = interp1d(aper_200_low['sig'],aper_200_low['ape'],)
apercorr = renormal / apercf2(xx)
if wheelpos == 955:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf3 = interp1d(aper_955_med['sig'],aper_955_med['ape'],)
apercorr = renormal / apercf3(xx)
#apercf3 = interp1d([0,6],[0,1],fill_value=(0,1),bounds_error=False)
#apercorr = 1.0/apercf3(xx) # change psf to test if there is apercorr before coi-corr
if wheelpos == 1000:
if (type(coi) == typeNone) or (coi < 0.1) :
apercf4 = interp1d(aper_1000_low['sig'],aper_1000_low['ape'],)
apercorr = renormal / apercf4(xx)
else:
# when xx<4.5, mode !gaussian, wheelpos==None use the following
# 2012-02-21 PSF best fit at 3500 from cal_psf aper05+aper08 valid for 0.5 < xx < 4.5
# the function does not rise as steeply so has more prominent wings
tck = (np.array([ 0. , 0. , 0. , 0. , 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1. , 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9,
2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3. ,
3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4. , 4.1,
4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 5. , 5. , 5. , 5. ]),
np.array([ -6.45497898e-19, 7.97698047e-02, 1.52208991e-01,
2.56482414e-01, 3.31017197e-01, 4.03222197e-01,
4.72064814e-01, 5.37148347e-01, 5.97906198e-01,
6.53816662e-01, 7.04346413e-01, 7.48964617e-01,
7.87816053e-01, 8.21035507e-01, 8.48805502e-01,
8.71348421e-01, 8.88900296e-01, 9.03143354e-01,
9.16085646e-01, 9.28196443e-01, 9.38406001e-01,
9.45971114e-01, 9.51330905e-01, 9.54947930e-01,
9.57278503e-01, 9.58780477e-01, 9.59911792e-01,
9.60934825e-01, 9.62119406e-01, 9.63707446e-01,
9.66045076e-01, 9.69089467e-01, 9.73684854e-01,
9.75257929e-01, 9.77453939e-01, 9.81061451e-01,
9.80798098e-01, 9.82633805e-01, 9.83725248e-01,
9.84876762e-01, 9.85915295e-01, 9.86929684e-01,
9.87938594e-01, 9.88979493e-01, 9.90084808e-01,
9.91288321e-01, 9.92623448e-01, 9.94123703e-01,
9.96388866e-01, 9.98435907e-01, 1.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]), 3)
apercorr = 1.0/splev( xx, tck,)
if norder == 2:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
if norder == 3:
apercorr = 1.0/uvotmisc.GaussianHalfIntegralFraction( 0.5*(k2-k1)/np.polyval(sigcoef,x) )
return apercorr
def clipmask(f,sigclip=2.5,fpos=False):
'''Provides mask to clip bad data.
Parameters
----------
f : 2D array
kwargs : dict
optional arguments
- **sigclip** : float
clip data at `sigma` standard deviations above the mean
- **fpos** : bool
if True, clip negative values
Returns
-------
mask : 2D array, boolean
Array of same size as image, true where within sigclip standard
deviations of mean.
Notes
-----
By default infinities are clipped.
The mask is iterated until it converges. So the effect of outliers
on the standard deviation is nil. This also means that sigma needs
to be chosen large enough or the standard deviation will not be
a good measure of the real noise in the mean.
'''
import numpy as np
bg = f
if fpos:
mask = (np.isfinite(f) & (f >= 0.))
else:
mask = np.isfinite(f)
m0 = len(np.where(mask)[0])
n = 50
bad = True
while (bad & (n > 0)):
n -= 1
mask = abs(f - f[mask].mean()) < sigclip * f[mask].std()
m = len(np.where(mask)[0])
if m == m0: bad = False
else: m0 = m
return mask
def get_components(xpos,ori_img,Ypositions,wheelpos,chatter=0,caldefault=False,\
sigmas=None,noiselevel=None,width=40.0,composite_fit=True, fiterrors = True, \
smoothpix=1, amp2lim=None,fixsig=False,fixpos=False):
''' extract the spectral components for an image slice
at position(s) xpos (dispersion axis) using the Ypositions
of the orders. The value of Ypositions[0] should be the main peak.
Notes: implicit assumption is that the 'y' axis is the pixel number.
if for some reason the data pairs are (z_i,f_meas_i) then the definition of y
changes into z.
if the return value for the centre of the gaussian exceeds some number (sig?),
then the solution is probably suspect. In that case a second fit with sig? held
fixed perhaps should be done.
some tests show that the solution is very sensitive to the first guess of the
position of the peak. It will even find a dip in the noise (neg amplitude)
rather than the main peak or overshoot the peak if the starting guess is too far
off, and fudge sigma to be large.
Error Flag:
flag[0] 0 = ok, 1=solution main peak is offset from Ypositions by more than 'sig' pixels
flag[1] 0 = ok, 1=solution secondary peak is offset from Ypositions by more than 'sig' pixels
flag[2] 0 = ok, 1=solution third peak is offset from Ypositions by more than 'sig' pixels
flag[3] not used
flag[4] number of orders in answer
flag[5] error flag returned by fitting program
noiselevel:
if the fit to the peak has a maximum < noiselevel then the peak will be removed.
fiterrors True implies caldefault=True
smoothpix: the number of pixels along dispersion to smooth over for
fitting gaussians across dispersion
amp2lim: second order prediction of a (minimum, maximum) valid for all xpos
NPMK, 2010-07-15 Fecit
NPMK, 2011-08-16 adding smoothing for improved fitting
NPMK 2011-08-26 replace leastsq with mpfit based routines; clip image outside spectrum width
'''
import numpy
from numpy import array, arange,transpose, where, abs, min, zeros, atleast_1d, atleast_2d, sqrt
try:
from convolve import boxcar
except:
from stsci.convolve import boxcar
xpos = atleast_1d(xpos)
ori_img = atleast_2d(ori_img)
Ypositions = atleast_1d(Ypositions)
xpos = xpos.flatten()
Ypositions = Ypositions.flatten()
nypos = len(Ypositions)
smoothpix = int(smoothpix)
if smoothpix > 1:
spimg = boxcar(ori_img.copy(),(smoothpix,),mode='reflect')
else: spimg = ori_img
if type(sigmas) == typeNone:
sigmas = array([3.1,4.3,4.6])
if chatter > 4:
print("get_components: input prameter wheelpos ", wheelpos)
print("get_components: input parameter xpos ", xpos)
print("get_components: input parameter Ypositions ", Ypositions)
print("get_components: number of orders : ",nypos)
print("get_components: dimension input image ", spimg.shape)
xpos = xpos[ where(xpos < spimg.shape[1])[0] ] # eliminate elements outside range
if len(xpos) <1:
print("get_components: xpos must be at least one number")
raise ValueError
return
elif len(xpos) == 1:
f_meas = spimg[:,xpos]
f_ori = ori_img[:,xpos]
else:
f_meas = spimg[:,xpos].mean(axis=1)
f_ori = ori_img[:,xpos].mean(axis=1)
f_meas = f_meas.flatten()
f_ori = f_ori.flatten()
f_pos = f_meas >= 0
f_err = 9.99e+9 * numpy.ones(len(f_meas))
f_err[f_pos] = 1.4*sqrt(f_meas[f_pos])
bg_mask = clipmask( f_meas, fpos=True)
f_mask = bg_mask
bg = f_meas[bg_mask].mean()
if type(noiselevel) == typeNone:
noiselevel = f_meas[bg_mask].mean()
if chatter > 3: print("get_components: adopted noiselevel = ", noiselevel)
y = arange(spimg.shape[0],dtype=float) # pixel number
flag = zeros(6, dtype=int )
if caldefault:
if type(sigmas) == typeNone:
print("missing parameter fitorder in uvotgetspec.get_components\n")
else:
# the positions of the centre of the fits are given in Ypositions
sigmaas = atleast_1d(sigmas)
if nypos == 1:
if chatter > 3: print('len Ypositions == 1')
sig0 = sigmaas[0]
p0 = Ypositions[0]
a0 = max(f_meas)
f_mask[p0-4*sig0:p0+4*sig0] = True
Z = runfit1(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,\
fixsig=fixsig,fixpos=fixpos)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0] = Z.params
else:
if chatter > 4:
print("runfit1 status:",Z.status)
print("runfit1 params:",Z.params)
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0),flag), (y,f_meas)
if nypos == 2:
if chatter > 3: print('len Ypositions == 2')
sig0, sig1 = sigmaas[0], sigmaas[1]
p0, p1 = Ypositions
a0 = 0.9 * max(f_meas)
a1 = 0.5*a0
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p1-4*sig1:p1+4*sig1] = True
Z = runfit2(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1),flag), (y,f_meas)
if nypos == 3:
if chatter > 3: print('len Ypositions == 3')
sig0,sig1,sig2 = sigmaas[:]
p0, p1, p2 = Ypositions
a0 = 0.9* max(f_meas)
a1 = a0
a2 = a1
f_mask[p0-4*sig0:p0+4*sig0] = True
f_mask[p2-4*sig2:p2+4*sig2] = True
Z = runfit3(y[f_mask],f_meas[f_mask],f_err[f_mask],bg,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2,\
fixsig=fixsig,fixpos=fixpos,amp2lim=amp2lim)
flag[5] = Z.status
if Z.status > 0:
[bg0,bg1,a0,p0,sig0,a1,p1,sig1,a2,p2,sig2] = Z.params
if fiterrors: return (Z.params,Z.perror,flag), (y,f_meas) # errors in fit = Z.perror
else: return ((a0,p0,sig0,a1,p1,sig1,a2,p2,sig2),flag), (y,f_meas)
if wheelpos < 500 :
sig = 6
else:
sig = 4
sig0 = sig
Sig = sig
# width = 40 Maximum order distance - parameter in call ?
# start with fitting using a fixed sig
# to get the peaks fixed do them one by one
if len(Ypositions) < 4 :
# FIT ONE PEAK for all observations
# first guess single gaussian fit parameters
a0 = f_meas.max()
y0 = Ypositions[0]
(p0_,p1), ier = leastsq(Fun1b, (a0,y0), args=(f_meas,y,sig) )
# if the "solution" is wrong use the input as best guess:
if abs(Ypositions[0] - p1) > 15:
p1 = y0
flag[0] = 3
else: # shift the input positions
delpos = p1-Ypositions[0]
Ypositions += delpos
# refine the sigma with fixed centre for the peak
(p0,sig_), ier = leastsq(Fun1a, (p0_,sig), args=(f_meas,y,p1) )
if ((sig_ > 0.1*sig) & (sig_ < 6.* sig)):
sig1 = sig_
else: sig1 = sig
Yout = ((p0,p1,sig1), flag), (y,f_meas)
if chatter > 3:
print("highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag=%2i "%(p0,p1,sig1,ier))
else:
print('Error in number of orders given in Ypositions')
return
# limit acceptable range for seaching for maxima
q = where( (y < p1+width) & (y > p1-0.5*width) ) # if direction known, one can be set to 3*sig
yq = y[q[0]]
qok = len(q[0]) > 0
if ( (len(Ypositions) > 1) & qok ):
# TWO PEAKS
# double gaussian fit: remove the first peak from the data and fit the residual
f_meas_reduced = f_meas[q] - singlegaussian(yq, p0, p1, sig_)
a0 = f_meas_reduced.max()
y0 = where(f_meas_reduced == a0)[0][0]
Y2 = (p2,p3) , ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,yq,sig))
if chatter > 3:
print('position order 2: %8.1f shifted to %8.1f'%(p3,p3+y[q][0]))
p3 += y[q][0]
# check that the refined value is not too far off:
if abs(p3 - Ypositions[1]) > 15:
if chatter > 3: print("problem p3 way off p3=",p3)
p3 = Ypositions[1]
flag[1] = 3
Y2 = (p2,sig2), ier = leastsq(Fun1a, (p2,sig1), args=(f_meas_reduced,yq,p3 ))
if not ((sig2 > 0.25*sig1) & (sig2 < 4.* sig1)):
sig2 = sig1
newsig2 = False
else:
# keep sig2
newsig2 = True
if chatter > 3:
print("second highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f ; ier flag=%2i "%(p2,p3,sig2, ier))
Yout = ((p0,p1,sig1,p2,p3,sig2),flag), (y,q,f_meas,f_meas_reduced)
if ((len(Ypositions) > 2) & qok ):
# triple gaussian fit: removed the second peak from the data
(p0,p1,sig1,p2,p3,sig2), ier = \
leastsq(Fun2, (p0,p1,sig1,p2,p3,sig2) , args=(f_meas[q],y[q]))
if chatter > 3:
print("fit double gaussian (%8.2f,%8.2f,%8.2f, %8.2f,%8.2f,%8.2f)"%\
(p0,p1,sig1,p2,p3,sig2))
f_meas_reduced = f_meas[q] - doublegaussian(yq,p0,p1,sig1,p2,p3,sig2)
if not newsig2:
y0 = Ypositions[2]
a0 = 10*noiselevel
else:
a0 = f_meas_reduced.max()
y0 = y[q][where(f_meas_reduced == a0)[0][0]]
if chatter > 3: print("third order input fit: amplitude = %8.2f, position = %8.2f"%(a0,y0))
sig3 = 2*sig2
Y3 = (p4,p5), ier = leastsq(Fun1b, (a0,y0) , args=(f_meas_reduced,y[q],sig3))
p5 += y[q][0]
if abs(p5-Ypositions[2]) > 15:
p5 = Ypositions[2]
flag[2] = 3
Y3 = (p4a,sig3), ier = leastsq(Fun1a, (p4,sig3), args=(f_meas_reduced,y[q],p5 ))
if sig3 > 6*sig: sig3 = 2*sig2
if chatter > 3:
print("third highest peak amplitude=%8.1f, position=%8.1f, sigma=%8.2f, ier flag =%i "\
%(p4,p5,sig3,ier))
Yout = ((p0,p1,sig1,p2,p3,sig2,p4,p5,sig),flag),(y,q,f_meas,f_meas_reduced)
# now remove odd solutions - TBD: just flagging now
# check that the solutions for the centre are within 'Sig' of the input 'Ypositions'
if chatter > 2:
print("input Ypositions: ", Ypositions)
nposi = len(Ypositions)
if len(Ypositions) < 4 :
dy = min(abs(p1 - Ypositions))
if dy > Sig: flag[0] += 1
if ((len(Ypositions) > 1) & ( len(q[0]) > 0 )):
dy = min(abs(p3 - Ypositions))
if dy > Sig: flag[1] += 1
dy = abs(p3 - p1)
if dy < sig:
flag[1] += 10
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if p2 < noiselevel:
flag[1] += 20
ip = where(abs(p3-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
#return (p0,p1,p2,p3), Ypositions, ip, noiselevel,dy
indx.pop(ip)
Ypositions = Ypositions[indx]
if ((len(Ypositions) > 2) & qok):
dy = min(abs(p5 - Ypositions))
if dy > Sig: flag[2] += 1
dy = abs(p5 - p1)
if dy < sig:
flag[2] += 10
ip = where(abs(p5-Ypositions) < 0.2*dy)[0]
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip)
Ypositions = Ypositions[indx]
if p4 < noiselevel:
flag[2] += 20
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if chatter > 2: print('ip = ',ip)
indx = list(range(len(Ypositions)))
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if flag[1] != 10:
dy = abs(p5 - p3)
if dy < sig:
flag[2] += 100
ip = where(abs(p5-Ypositions) < 0.9*dy)[0]
if len(ip) == 0:
print("problem with fitting peak # 2 ")
else:
indx = list(range(len(Ypositions)))
indx.pop(ip[-1])
Ypositions = Ypositions[indx]
if chatter > 2:
print("flag: ",flag)
print(" initial fit parameters: \n first peak:", p0, p1, sig1)
if nposi > 1: print(" second peak:", p2,p3, sig2)
if nposi > 2: print(" third peak:", p4,p5, sig3)
print(" intermediate Ypositions: ", Ypositions)
if not composite_fit: # bail out at this point
if len(Ypositions) == 1:
Y1 = ((p0,p1,sig), flag), 0
elif len(Ypositions) == 2:
Y1 = ((p0,p1,sig,p2,p3,sig2), flag), 0
elif len(Ypositions) == 3:
Y1 = ((p0,p1,sig,p2,p3,sig2,p4,p5,sig), flag), 0
else:
Y1 = Yout
return Y1
# free sig and refit
if ( len(Ypositions) == 1) :
# first guess single gaussian fit parameters in range given by width parameter
a0 = p0
y0 = p1
if chatter > 3:
print("f_meas :", transpose(f_meas))
print("a0: %8.2f \ny0: %8.2f \nsig0 : %8.2f "%(a0,y0,sig))
print(q)
params_fit, ier = leastsq(Fun1, (a0,y0,sig), args=(f_meas[q],y[q]) )
flag[5] = 1
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y)
elif (qok & (len(Ypositions) == 2) ):
# double gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
Y0 = params_fit, ier = leastsq(Fun2, (a0,y0,sig,a1,y1,sig) , args=(f_meas[q],y[q]))
flag[5]=2
flag[4]=ier
# remove odd solutions - TBD
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
elif (qok & (len(Ypositions) == 3)):
# restricting the fitting to a smaller region around the peaks to
# fit will reduce the effect of broadening the fit due to noise.
q = where( (y > p1-3.*sig1) & (y < p3+3*sig3) )
# ====
# triple gaussian fit
a0 = p0
y0 = p1
a1 = p2
y1 = p3
a2 = p4
y2 = p5
Y0 = params_fit, ier = leastsq(Fun3, (a0,y0,sig1,a1,y1,sig2,a2,y2,sig3) , args=(f_meas[q],y[q]))
flag[5] = 3 # number of peaks
flag[4] = ier
# remove odd solutions
return (params_fit, flag), (f_meas, y, f_meas_reduced, q)
else:
# error in call
print("Error in get_components Ypositions not 1,2,or 3")
return Yout
def obsid2motion(obsid, file_path):
''' By Zexi
to obtain motion (pixels) from a precreated motion table
'''
import pandas as pd
data=pd.read_csv(file_path,sep=' ',header=0)
data['OBS_ID']=data['OBS_ID'].astype(str)
data['OBS_ID']='000'+data['OBS_ID']
d = data.set_index(['OBS_ID'])
motion_v = d.loc[obsid]['MOTION_V']
motion_p = d.loc[obsid]['MOTION_P']
dict = {'V':motion_v, 'P':motion_p}
return dict
def Fun1(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1a(p,y,x,x0):
'''compute the residuals for gaussian fit with fixed centre in get_components '''
a0, sig0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1b(p,y,x,sig0):
'''compute the residuals for gaussian fit with fixed width in get_components '''
a0, x0 = p
return y - singlegaussian(x,a0,x0,sig0)
def Fun1c(p,y,x,x0,sig0):
'''compute the residuals for gaussian fit with fixed centre and width in get_components '''
a0 = p
return y - singlegaussian(x,a0,x0,sig0)
def DFun1(p,y,x):
'''There is something wrong with the return argument. Should prob be a matrix of partial derivs '''
a0, x0, sig0 = p
return -Dsinglegaussian(x,a0,x0,sig0)
def Fun2(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components for fixed sig '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig,a1,x1,sig)
def Fun2bb(p,y,x,sig1,sig2):
'''compute the residuals for gaussian fit in get_components for fixed sig1, and sig2 '''
a0, x0, a1,x1 = p
return y - doublegaussian(x,a0,x0,sig1,a1,x1,sig2)
def Fun2bc(p,y,x,x0,x1):
'''compute the residuals for gaussian fit in get_components for fixed centre x0, x1 '''
a0, sig0, a1,sig1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun2c(p,y,x,x0,sig0,x1,sig1):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1 = p
return y - doublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def DFun2(p,y,x):
a0, x0, sig0,a1,x1,sig1 = p
return -Ddoublegaussian(x,a0,x0,sig0,a1,x1,sig1)
def Fun3(p,y,x):
'''compute the residuals for gaussian fit in get_components '''
a0, x0, sig0 ,a1,x1,sig1 ,a2,x2,sig2= p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun3b(p,y,x,sig):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig,a1,x1,sig,a2,x2,sig)
def Fun3bb(p,y,x,sig1,sig2,sig3):
'''compute the residuals for gaussian fit in get_components '''
a0,x0,a1,x1,a2,x2 = p
return y - trigaussian(x,a0,x0,sig1,a1,x1,sig2,a2,x2,sig3)
def Fun3c(p,y,x,x0,sig0,x1,sig1,x2,sig2):
'''compute the residuals for gaussian fit in get_components for fixed centre x_i and width sig_i '''
a0, a1, a2 = p
return y - trigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def DFun3(p,y,x):
a0, x0, sig0,a1,x1,sig1,a2,x2,sig2 = p
return -Dtrigaussian(x,a0,x0,sig0,a1,x1,sig1,a2,x2,sig2)
def Fun4(p,y,x,motion0):
a0, x0, sig0 = p
return y - smeargaussian(x,a0,x0,sig0,motion0)
def singlegaussian(x, a0, x0, sig0 ):
'''
The function returns the gaussian function
on array x centred on x0 with width sig0
and amplitude a0
'''
x = np.atleast_1d(x)
f = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f[q] = a0 * np.exp( - ((x[q]-x0)/sig0)**2 )
return f
def Dsinglegaussian(x, a0, x0, sig0):
'''partial derivative of singlegaussian to all parameters'''
f = singlegaussian(x, a0, x0, sig0)
dfda0 = f/a0
dfdx0 = 2*x0*(x-x0)*f/sig0**2
dfdsig0 = 2*f*(x-x0)**2/sig0**3
return dfda0, dfdx0, dfdsig0
def doublegaussian(x, a0, x0, sig0, a1, x1, sig1 ):
'''
The function returns the double gaussian function
on array x centred on x0 and x1 with width sig0 and sig1
and amplitude a0, and a1
'''
x = np.atleast_1d(x)
f1 = 0. * x.copy()
f2 = 0. * x.copy()
q = np.where( np.abs(x-x0) < 4.*sig0 )
f1[q] = a0 * | np.exp( - ((x[q]-x0)/sig0)**2 ) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
import os
from utils.plotting_utils import plot_trajectories, plot_maze, plot_observations, plot_trajectory
def wrap_angle(angle):
return ((angle - np.pi) % (2 * np.pi)) - np.pi
def mix_data(file_in1, file_in2, file_out, steps_per_episode=100, num_episodes=1000):
data1 = dict(np.load(file_in1))
data2 = dict(np.load(file_in2))
data_mix = dict()
for key in data1.keys():
d1 = data1[key][:steps_per_episode*num_episodes//2]
d2 = data2[key][:steps_per_episode*num_episodes//2]
data_mix[key] = np.concatenate((d1, d2), axis=0)
| np.savez(file_out, **data_mix) | numpy.savez |
"""
The pycity_scheduling framework
Copyright (C) 2022,
Institute for Automation of Complex Power Systems (ACS),
E.ON Energy Research Center (E.ON ERC),
RWTH Aachen University
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import unittest
import datetime
import logging
import warnings
import pyomo.environ as pyomo
from pyomo.opt import TerminationCondition
from shapely.geometry import Point
from pycity_scheduling import constants, solvers
from pycity_scheduling.classes import *
from pycity_scheduling.util.metric import *
class TestModule(unittest.TestCase):
def test_filter_entities(self):
e = get_env(4, 8)
bd = Building(e)
bes = BuildingEnergySystem(e)
pv = Photovoltaic(e, 0)
bes.addDevice(pv)
bd.addEntity(bes)
def do_test(gen):
entities = list(gen)
self.assertEqual(1, len(entities))
self.assertIn(pv, entities)
do_test(filter_entities(bd.get_entities(), 'PV'))
do_test(filter_entities(bd, 'generation_devices'))
do_test(filter_entities(bd, [Photovoltaic]))
do_test(filter_entities(bd, ['PV']))
do_test(filter_entities(bd, {'PV': Photovoltaic}))
with self.assertRaises(ValueError):
next(filter_entities(bd, 'PPV'))
with self.assertRaises(ValueError):
next(filter_entities(bd, [int]))
with self.assertRaises(ValueError):
next(filter_entities(bd, None))
return
class TestBattery(unittest.TestCase):
def setUp(self):
e = get_env(3)
self.bat = Battery(e, 10, 20, soc_init=0.875, eta=0.5)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[2] == 10)
model.c2 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 5)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
result = solve_model(model)
# TODO stats are currently not correct due to a pyomo bug
# use result as a workaround
#model.compute_statistics()
#stats = model.statistics
#self.assertEqual(12, stats.number_of_variables)
self.assertEqual(13, result.Problem[0].number_of_variables)
var_sum = pyomo.value(pyomo.quicksum(self.bat.model.p_el_vars[t] for t in range(1, 3)))
self.assertAlmostEqual(40, var_sum, places=5)
var_sum = pyomo.value(pyomo.quicksum(
self.bat.model.p_el_supply_vars[t] + self.bat.model.p_el_demand_vars[t] for t in range(1, 3)
))
self.assertAlmostEqual(40, var_sum, places=5)
return
def test_update_model(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
demand_var = self.bat.model.p_el_vars
self.bat.update_model()
model.c1 = pyomo.Constraint(expr=self.bat.model.e_el_vars[0] == 10)
obj = pyomo.sum_product(demand_var, demand_var)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.assertAlmostEqual(10, pyomo.value(demand_var[0]), places=5)
return
def test_update_schedule(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
self.bat.update_model()
self.bat.model.p_el_demand_vars.setlb(3.0)
self.bat.model.p_el_demand_vars.setub(3.0)
self.bat.model.p_el_supply_vars.setlb(0.0)
self.bat.model.p_el_supply_vars.setub(0.0)
obj = pyomo.sum_product(self.bat.model.p_el_demand_vars, self.bat.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.bat.update_schedule()
assert_equal_array(self.bat.p_el_schedule, [3] * 3)
assert_equal_array(self.bat.e_el_schedule, 0.875 * 10 + np.arange(1, 4)*3*0.25*0.5)
return
def test_calculate_co2(self):
self.bat.p_el_schedule = np.array([10]*3)
self.assertEqual(0, calculate_co2(self.bat))
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
self.bat.populate_model(model)
obj = self.bat.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
for t in range(3):
self.assertIn(self.bat.model.p_el_vars[t], vs)
self.bat.model.p_el_vars[t] = t * 5
self.assertEqual(3, len(vs))
self.assertEqual(sum(2*(5*t)**2 for t in range(3)), pyomo.value(obj))
return
def test_e_ini(self):
expected_schedule = list(range(4, 21, 2))
e = get_env(3, 9, 2)
model = pyomo.ConcreteModel()
bat = Battery(e, 20, 10, soc_init=0.1, eta=0.8)
bat.populate_model(model)
model.o = pyomo.Objective(expr=-bat.model.e_el_vars[2])
for t in range(4):
bat.update_model()
solve_model(model)
bat.update_schedule()
e.timer.mpc_update()
assert_equal_array(bat.e_el_schedule, expected_schedule[:3+t*2] + [0] * 2 * (3-t))
assert_equal_array(bat.p_el_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_demand_schedule, [10] * (3 + t * 2) + [0] * 2 * (3 - t))
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
def test_no_discharge(self):
e = get_env(9, 9)
model = pyomo.ConcreteModel()
bat = Battery(e, 30, 10, p_el_max_discharge=0, soc_init=0.5, eta=1)
bat.populate_model(model)
bat.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(bat.model.p_el_vars))
solve_model(model)
bat.update_schedule()
assert_equal_array(bat.e_el_schedule, [15] * 9)
assert_equal_array(bat.p_el_schedule, [0] * 9)
assert_equal_array(bat.p_el_demand_schedule, [0] * 9)
assert_equal_array(bat.p_el_supply_schedule, [0] * 9)
return
class TestBoiler(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.bl = Boiler(e, 10, 0.4)
return
def test_calculate_co2(self):
self.bl.p_th_heat_schedule = - np.array([10] * 8)
self.bl.p_th_heat_ref_schedule = - np.array([4] * 8)
co2_em = np.array([1111]*8)
co2 = calculate_co2(self.bl, co2_emissions=co2_em)
self.assertEqual(50.0*constants.CO2_EMISSIONS_GAS, co2)
co2 = calculate_co2(self.bl, timestep=4, co2_emissions=co2_em)
self.assertEqual(25.0*constants.CO2_EMISSIONS_GAS, co2)
self.bl.load_schedule("ref")
co2 = calculate_co2(self.bl, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_GAS, co2)
return
def test_lower_activation(self):
e = get_env(4, 8)
bl = Boiler(e, 10, lower_activation_limit=0.5)
model = pyomo.ConcreteModel()
bl.populate_model(model, "integer")
bl.update_model("integer")
model.o = pyomo.Objective(expr=bl.model.p_th_heat_vars[0])
results = solve_model(model)
self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition)
bl.model.p_th_heat_vars[0].setub(-0.1)
bl.model.p_th_heat_vars[0].setlb(-4.9)
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition)
return
def test_objective(self):
model = pyomo.ConcreteModel()
self.bl.populate_model(model)
self.bl.get_objective()
return
class TestBuilding(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.bd = Building(e)
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
env = self.bd.environment
env.prices.tou_prices[:4] = [1, 2, 3, 4]
env.prices.co2_prices[:4] = [5, 4, 3, 2]
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
self.bd.populate_model(model)
obj = self.bd.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
self.bd.model.p_el_vars[t].value = 10**t
self.assertAlmostEqual(2*4321/10*4, pyomo.value(obj), places=5)
model = pyomo.ConcreteModel()
bd2 = Building(env, 'co2')
bd2.addEntity(bes)
bd2.populate_model(model)
obj = bd2.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
bd2.model.p_el_vars[t].value = 10**t
# numerical errors caused by /14 and co2_prices being np.float32
self.assertAlmostEqual(2*2345/14*4, pyomo.value(obj), places=3)
model = pyomo.ConcreteModel()
bd3 = Building(env, 'peak-shaving')
bd3.addEntity(bes)
bd3.populate_model(model)
obj = bd3.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(4, len(vs))
for t in range(4):
bd3.model.p_el_vars[t].value = 10**t
self.assertEqual(2*1010101, pyomo.value(obj))
model = pyomo.ConcreteModel()
bd4 = Building(env, None)
bd4.addEntity(bes)
bd4.populate_model(model)
obj = bd4.get_objective(2)
vs = list(pyomo.current.identify_variables(obj))
self.assertEqual(0, len(vs))
for t in range(4):
bd4.model.p_el_vars[t].value = 10 ** t
self.assertEqual(0, pyomo.value(obj))
bd5 = Building(env, "invalid")
self.assertRaisesRegex(ValueError, ".*Building.*", bd5.get_objective)
return
def test_calculate_co2(self):
bes = BuildingEnergySystem(self.bd.environment)
pv = Photovoltaic(self.bd.environment, 0)
bes.addDevice(pv)
self.bd.addEntity(bes)
self.bd.p_el_schedule = np.array([-5] * 2 + [5] * 4 + [-5] * 2)
self.bd.p_el_ref_schedule = np.array([-2] * 2 + [2] * 4 + [-2] * 2)
pv.p_el_schedule = - np.array([10]*8)
pv.p_el_ref_schedule = - np.array([4]*8)
co2_em = np.array([100]*4 + [400]*4)
co2 = calculate_co2(self.bd, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_PV+1250.0, co2)
co2 = calculate_co2(self.bd, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_PV+250.0, co2)
self.bd.load_schedule("ref")
co2 = calculate_co2(self.bd, co2_emissions=co2_em)
self.assertEqual(8.0*constants.CO2_EMISSIONS_PV+500.0, co2)
return
def test_robustness(self):
model = pyomo.ConcreteModel()
env = self.bd.environment
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
ths1 = ThermalHeatingStorage(env, 10)
bes.addDevice(ths1)
ths2 = ThermalHeatingStorage(env, 25)
bes.addDevice(ths2)
ap = Apartment(env)
self.bd.addEntity(ap)
loadcurve = np.array([15, 15, 10, 10])
sh = SpaceHeating(env, loadcurve=loadcurve)
ap.addEntity(sh)
eh = ElectricalHeater(env, 20)
bes.addDevice(eh)
self.bd.populate_model(model, robustness=(3, 0.5))
self.bd.update_model(robustness=(3, 0.5))
assert_equal_array(np.array([self.bd.model.lower_robustness_bounds[i].value for i in range(3)]),
np.cumsum(loadcurve[:3])*0.5/4)
assert_equal_array(np.array([self.bd.model.upper_robustness_bounds[i].value for i in range(3)]),
35 - np.cumsum(loadcurve[:3]) * 0.5 / 4)
self.assertEqual(17.5, self.bd.model.lower_robustness_bounds[3].value)
self.assertEqual(17.5, self.bd.model.upper_robustness_bounds[3].value)
return
def testReset(self):
env = self.bd.environment
bes = BuildingEnergySystem(env)
self.bd.addEntity(bes)
schedules = list(self.bd.schedules.keys())
model = pyomo.ConcreteModel()
self.bd.populate_model(model)
self.bd.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(self.bd.model.p_el_vars))
solve_model(model)
self.assertEqual(schedules, list(self.bd.schedules.keys()))
self.bd.update_schedule()
self.assertEqual(schedules, list(self.bd.schedules.keys()))
self.bd.schedules["ref"]["p_el"] = np.arange(8)
self.bd.copy_schedule("new", "ref")
schedules.append("new")
self.bd.reset("ref")
for k in schedules:
if k == "new":
e = np.arange(8)
else:
e = np.zeros(8)
assert_equal_array(self.bd.schedules[k]["p_el"], e)
self.bd.reset()
for k in schedules:
assert_equal_array(self.bd.schedules[k]["p_el"], np.zeros(8))
self.assertEqual(schedules, list(self.bd.schedules.keys()))
with self.assertRaises(KeyError):
self.bd.load_schedule("nonexistent")
self.bd.p_el_schedule
with self.assertRaises(KeyError):
self.bd.load_schedule(None)
self.bd.p_el_schedule
return
class TestChiller(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.ch = Chiller(e, 10, cop=np.full(8, 11))
return
def test_update_model(self):
m = pyomo.ConcreteModel()
self.ch.populate_model(m)
self.ch.update_model()
c = self.ch.model.p_coupl_constr[0]
f, l = pyomo.current.decompose_term(c.body)
self.assertTrue(f)
for coeff, value in l:
if value is self.ch.model.p_el_vars[0]:
self.assertEqual(11, coeff)
if value is self.ch.model.p_th_cool_vars[0]:
self.assertEqual(1, coeff)
if value is None:
self.assertEqual(0, coeff)
return
def test_lower_activation(self):
e = get_env(4, 8)
ch = Chiller(e, 10, cop=np.full(8, 11), lower_activation_limit=0.5)
m = pyomo.ConcreteModel()
ch.populate_model(m, "integer")
ch.update_model("integer")
obj = pyomo.sum_product(ch.model.p_th_cool_vars, ch.model.p_th_cool_vars)
obj += 2 * 3 * pyomo.sum_product(ch.model.p_th_cool_vars)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
ch.update_schedule()
assert_equal_array(ch.p_th_cool_schedule[:4], [-5] * 4)
return
class TestCurtailableLoad(unittest.TestCase):
combinations = [(4, 1), (3, 1), (2, 1), (1, 1),
(1, 3), (1, 4), (2, 2), (2, 3),
(0, 1), (0, 2), (0, 3), (0, 4)]
horizon = 5
def setUp(self):
self.e = get_env(5, 20)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(5, pyomo.value(obj))
self.assertTrue(5, sum(cl.p_el_schedule[:5]))
return
def test_populate_model_on_off(self):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, 2, 2)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(7, pyomo.value(obj))
self.assertAlmostEqual(7, sum(cl.p_el_schedule[:5]))
return
def test_populate_model_integer(self):
for low, full in self.combinations:
min_states = sum(np.tile([False]*low + [True]*full, 5)[:5])
for nom in [0.5, 1, 2]:
with self.subTest(msg="max_low={} min_full={} nom={}".format(low, full, nom)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, nom, 0.75, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
cl.update_schedule()
schedule_states = np.isclose(cl.p_el_schedule[:5], [nom]*5)
assert_equal_array(cl.p_state_schedule[:5], schedule_states)
self.assertEqual(min_states, sum(schedule_states))
self.assertAlmostEqual(min_states*nom+(5-min_states)*nom*0.75, pyomo.value(obj))
return
def test_update_model(self):
for width in [1, 2, 4, 5]:
with self.subTest(msg="step width={}".format(width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
self.assertAlmostEqual(5, pyomo.value(obj))
self.assertAlmostEqual(5, sum(cl.p_el_schedule[t:t+5]))
return
def test_update_model_on_off(self):
for low, full in self.combinations:
for width in [1, 2, 4, 5]:
with self.subTest(msg="max_low={} min_full={} step width={}".format(low, full, width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
solve_model(model)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
endtimestep = self.e.timer.current_timestep + cl.op_horizon
for t in range(0, endtimestep):
self.assertGreaterEqual(cl.p_el_schedule[t], 1)
self.assertLessEqual(cl.p_el_schedule[t], 2)
for t in range(0, endtimestep-(low+full)+1):
self.assertGreaterEqual(sum(cl.p_el_schedule[t:t+low+full]) + 1e-4, 1*low + 2*full)
return
def test_update_model_integer(self):
for low, full in self.combinations:
states = np.tile([False] * low + [True] * full, 20)[:20]
for width in [1, 2, 4, 5]:
with self.subTest(msg="max_low={} min_full={} step width={}".format(low, full, width)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
for t in range(0, 20-5+1, width):
self.e.timer.current_timestep = t
cl.update_model(mode="integer")
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
best_obj = pyomo.value(obj)
model.o_constr = pyomo.Constraint(expr=best_obj == obj)
model.del_component("o")
model.o = pyomo.Objective(expr=pyomo.sum_product(range(0, -cl.op_horizon, -1),
cl.model.p_el_vars))
results = solve_model(model)
model.del_component("o")
model.del_component("o_constr")
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
cl.update_schedule()
schedule_states_el = np.isclose(cl.p_el_schedule[t:t+5], [2] * 5)
schedule_states_b = np.isclose(cl.p_state_schedule[t:t+5], [1] * 5)
assert_equal_array(schedule_states_b, states[t:t + 5])
assert_equal_array(schedule_states_el, schedule_states_b)
assert_equal_array(
cl.p_el_schedule[t:t+5],
np.full(5, 2 * 0.5) + np.array(states[t:t+5]) * (2 * (1. - 0.5))
)
return
def test_integer_first(self):
for low, full in self.combinations:
if low > 0:
with self.subTest(msg="max_low={} min_full={}".format(low, full)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(self.e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
self.e.timer.current_timestep = 1
cl.p_state_schedule[0] = False
cl.p_el_schedule[0] = 1
cl.update_model("integer")
cl.model.p_state_vars[0].setub(1.0)
cl.model.p_state_vars[0].setlb(1.0)
cl.model.p_state_vars[1].setub(0.0)
cl.model.p_state_vars[1].setlb(0.0)
model.o = pyomo.Objective(expr=cl.model.p_state_vars[0])
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
if full > 1:
self.assertEqual(results.solver.termination_condition, TerminationCondition.infeasible)
else:
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
return
def test_small_horizon(self):
for width in [1, 2, 4]:
for horizon in [1, 2, 4]:
if horizon >= width:
with self.subTest(msg="width={} horizon={}".format(width, horizon)):
e = get_env(horizon, 20)
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model()
solve_model(model)
self.assertEqual(1, pyomo.value(cl.model.p_el_vars[0]))
cl.update_schedule()
assert_equal_array(cl.p_el_schedule, [1] * 20)
return
def test_small_horizon_low_full(self):
for horizon in [1, 2, 4]:
e = get_env(horizon, 20)
for width in [1, 2, 4]:
if horizon >= width:
for low, full in self.combinations:
with self.subTest(msg="width={} horizon={} max_low={} min_full={}"
.format(width, horizon, low, full)):
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5, low, full)
cl.populate_model(model)
obj = pyomo.sum_product(cl.model.p_el_vars)
model.c = pyomo.Objective(expr=obj)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model()
solve_model(model)
cl.update_schedule()
for t in range(0, 20 - (low + full) + 1):
self.assertGreaterEqual(sum(cl.p_el_schedule[t:t + low + full]) + 1e-4,
1 * low + 2 * full,
np.array2string(cl.p_el_schedule))
return
def test_small_horizon_low_full_integer(self):
for horizon in [1, 2, 4]:
e = get_env(horizon, 20)
for width in [1, 2, 4]:
if horizon >= width:
for low, full in self.combinations:
with self.subTest(msg="width={} horizon={} max_low={} min_full={}".format(width, horizon, low, full)):
states = np.tile([1] * low + [2] * full, 20)[:20]
model = pyomo.ConcreteModel()
cl = CurtailableLoad(e, 2, 0.5, low, full)
cl.populate_model(model, mode="integer")
obj = pyomo.sum_product(cl.model.p_el_vars)
for t in range(0, 21 - horizon, width):
e.timer.current_timestep = t
cl.update_model(mode="integer")
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
best_obj = pyomo.value(obj)
model.o_constr = pyomo.Constraint(expr=best_obj == obj)
model.del_component("o")
model.o = pyomo.Objective(expr=pyomo.sum_product(range(-1, -cl.op_horizon-1, -1),
cl.model.p_el_vars))
results = solve_model(model)
model.del_component("o")
model.del_component("o_constr")
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
cl.update_schedule()
assert_equal_array(cl.p_el_schedule, states)
return
class TestCityDistrict(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.cd = CityDistrict(e)
return
def test_get_objective(self):
m = pyomo.ConcreteModel()
self.cd.populate_model(m)
def zero_constr(model, t):
return model.p_el_vars[t] == 0
self.cd.model.extra_constr = pyomo.Constraint(self.cd.model.t, rule=zero_constr)
m.o = pyomo.Objective(expr=self.cd.get_objective())
solve_model(m)
for t in range(4):
self.cd.model.p_el_vars[t].value = t
self.assertEqual(self.cd.objective, "price")
self.cd.environment.prices.da_prices = np.array([1]*2 + [4]*6)
self.assertAlmostEqual(8.4, pyomo.value(self.cd.get_objective()))
self.cd.objective = 'peak-shaving'
self.assertAlmostEqual(14, pyomo.value(self.cd.get_objective()))
self.cd.objective = 'valley-filling'
self.cd.valley_profile = np.array([-1]*8)
self.assertAlmostEqual(2, pyomo.value(self.cd.get_objective()))
self.cd.objective = None
self.assertAlmostEqual(0, pyomo.value(self.cd.get_objective()))
self.cd.objective = "invalid"
self.assertRaisesRegex(ValueError, ".*CityDistrict.*", self.cd.get_objective)
m = pyomo.ConcreteModel()
self.cd.objective = "max-consumption"
self.cd.populate_model(m)
self.cd.model.p_el_vars[0].setub(-1)
m.o = pyomo.Objective(expr=self.cd.get_objective())
solve_model(m)
self.assertAlmostEqual(1, pyomo.value(self.cd.get_objective()))
return
def test_calculate_costs(self):
self.cd.p_el_schedule = np.array([10]*4 + [-20]*4)
self.cd.p_el_ref_schedule = np.array([4]*4 + [-4]*4)
prices = np.array([10]*4 + [20]*4)
costs = calculate_costs(self.cd, prices=prices, feedin_factor=0.5)
self.assertEqual(-100, costs)
costs = calculate_costs(self.cd, timestep=4, prices=prices)
self.assertEqual(100, costs)
self.cd.load_schedule("ref")
costs = calculate_costs(self.cd, prices=prices)
self.assertEqual(-40, costs)
return
def test_calculate_co2(self):
pv = Photovoltaic(self.cd.environment, 0)
self.cd.addEntity(pv, Point(0, 0))
self.cd.p_el_schedule = np.array([-5] * 2 + [5] * 4 + [-5] * 2)
self.cd.p_el_ref_schedule = np.array([-2] * 2 + [2] * 4 + [-2] * 2)
pv.p_el_schedule = - np.array([10] * 8)
pv.p_el_ref_schedule = - np.array([4] * 8)
co2_em = np.array([100] * 4 + [400] * 4)
co2 = calculate_co2(self.cd, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_PV+1250.0, co2)
co2 = calculate_co2(self.cd, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_PV+250.0, co2)
self.cd.load_schedule("ref")
co2 = calculate_co2(self.cd, co2_emissions=co2_em)
self.assertEqual(8.0*constants.CO2_EMISSIONS_PV+500.0, co2)
return
def test_self_consumption(self):
pv = Photovoltaic(self.cd.environment, 0)
self.cd.addEntity(pv, Point(0, 0))
self.cd.p_el_schedule = np.array([4]*2 + [-4]*2 + [-10]*2 + [-2]*2)
self.cd.p_el_ref_schedule = np.array([2]*2 + [-6]*2 + [-9]*2 + [-1]*2)
pv.p_el_schedule = - np.array([0]*2 + [8]*4 + [0]*2)
pv.p_el_ref_schedule = - np.array([0]*8)
self.assertEqual(0.25, self_consumption(self.cd))
self.assertEqual(0.5, self_consumption(self.cd, timestep=4))
self.cd.load_schedule("ref")
self.assertEqual(1, self_consumption(self.cd))
return
def test_calculate_adj_costs(self):
self.cd.p_el_schedule = np.array([4] * 2 + [-4] * 2 + [-10] * 2 + [-2] * 2)
self.cd.p_el_ref_schedule = np.array([2] * 2 + [-6] * 2 + [-9] * 2 + [-1] * 2)
prices = np.array([10] * 4 + [20] * 4)
costs_adj = calculate_adj_costs(self.cd, "ref", prices=prices)
self.assertEqual(2*5+2*5+1*10+1*10, costs_adj)
costs_adj = calculate_adj_costs(self.cd, "ref", prices=prices, total_adjustments=False)
self.assertEqual(20, costs_adj)
self.cd.copy_schedule("ref")
costs_adj = calculate_adj_costs(self.cd, "ref", prices=prices)
self.assertEqual(0, costs_adj)
return
def test_autarky(self):
pv = Photovoltaic(self.cd.environment, 0)
self.cd.addEntity(pv, Point(0, 0))
self.cd.p_el_schedule = np.array([4]*2 + [-4]*2 + [-10]*2 + [-2]*2)
self.cd.p_el_ref_schedule = - np.array([0]*2 + [8]*4 + [0]*2)
pv.p_el_schedule = - np.array([0]*2 + [8]*4 + [0]*2)
pv.p_el_ref_schedule = - np.array([0]*2 + [8]*4 + [0]*2)
self.assertEqual(0.5, autarky(self.cd))
self.assertEqual(0, autarky(self.cd, timestep=2))
self.cd.load_schedule("ref")
self.assertEqual(1, autarky(self.cd))
return
class TestCombinedHeatPower(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.chp = CombinedHeatPower(e, 10, 10, 0.8)
return
def test_calculate_co2(self):
self.chp.p_th_heat_schedule = - np.array([10] * 8)
self.chp.p_th_heat_ref_schedule = - np.array([4] * 8)
co2_em = np.array([1111]*8)
co2 = calculate_co2(self.chp, co2_emissions=co2_em)
self.assertEqual(50.0*constants.CO2_EMISSIONS_GAS, co2)
co2 = calculate_co2(self.chp, timestep=4, co2_emissions=co2_em)
self.assertEqual(25.0*constants.CO2_EMISSIONS_GAS, co2)
self.chp.load_schedule("ref")
co2 = calculate_co2(self.chp, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_GAS, co2)
return
def test_lower_activation(self):
e = get_env(4, 8)
chp = CombinedHeatPower(e, 10, 10, 0.8, 0.5)
m = pyomo.ConcreteModel()
chp.populate_model(m, "integer")
chp.update_model("integer")
obj = pyomo.sum_product(chp.model.p_el_vars, chp.model.p_el_vars)
obj += 2*3 * pyomo.sum_product(chp.model.p_el_vars)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
chp.update_schedule()
assert_equal_array(chp.p_el_schedule[:4], [-5]*4)
return
def test_bounds(self):
e = get_env(8, 8)
chp = CombinedHeatPower(e, 10, None, 0.8, 0.5)
m = pyomo.ConcreteModel()
chp.populate_model(m)
chp.update_model()
for t in range(8):
self.assertEqual(0, chp.model.p_el_vars[t].ub)
self.assertEqual(0, chp.model.p_th_heat_vars[t].ub)
self.assertEqual(-10, chp.model.p_el_vars[t].lb)
self.assertEqual(-10, chp.model.p_th_heat_vars[t].lb)
chp = CombinedHeatPower(e, 10, 5, 0.8, 0.5)
m = pyomo.ConcreteModel()
chp.populate_model(m)
chp.update_model()
for t in range(8):
self.assertEqual(0, chp.model.p_el_vars[t].ub)
self.assertEqual(0, chp.model.p_th_heat_vars[t].ub)
self.assertEqual(-5, chp.model.p_el_vars[t].lb)
self.assertEqual(-10, chp.model.p_th_heat_vars[t].lb)
return
class TestDeferrableLoad(unittest.TestCase):
def setUp(self):
self.e = get_env(6, 9)
self.lt = [0, 1, 1, 1, 0, 1, 1, 1, 0]
return
def test_update_model(self):
with self.assertWarns(UserWarning):
dl = DeferrableLoad(self.e, 19, 10, load_time=self.lt)
model = pyomo.ConcreteModel()
dl.populate_model(model)
obj = pyomo.sum_product(dl.model.p_el_vars, dl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
dl.update_model()
solve_model(model)
self.assertAlmostEqual(10, pyomo.value(pyomo.sum_product(dl.model.p_el_vars)) * dl.time_slot, places=5)
dl.timer.mpc_update()
dl.update_model()
solve_model(model)
for t, c in enumerate(self.lt[1:7]):
if c == 1:
self.assertEqual(19, dl.model.p_el_vars[t].ub)
else:
self.assertEqual(0, dl.model.p_el_vars[t].ub)
dl.update_schedule()
assert_equal_array(dl.p_el_schedule[:7], [0, 8, 8, 8, 0, 8, 8])
assert_equal_array(dl.p_start_schedule[:7], [False, True, False, False, False, False, False])
return
def test_infeasible_consumption(self):
with self.assertWarns(UserWarning):
feasible = DeferrableLoad(self.e, 10, 10, load_time=self.lt)
m = pyomo.ConcreteModel()
feasible.populate_model(m)
feasible.update_model()
obj = pyomo.sum_product(feasible.model.p_el_vars)
m.o = pyomo.Objective(expr=obj)
results = solve_model(m)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
m = pyomo.ConcreteModel()
with self.assertWarns(UserWarning):
infeasible = DeferrableLoad(self.e, 10, 10.6, load_time=self.lt)
infeasible.populate_model(m)
infeasible.update_model()
obj = pyomo.sum_product(infeasible.model.p_el_vars)
m.o = pyomo.Objective(expr=obj)
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(m)
logger.setLevel(oldlevel)
self.assertEqual(results.solver.termination_condition, TerminationCondition.infeasible)
return
def test_update_model_integer(self):
with self.assertWarns(UserWarning):
dl = DeferrableLoad(self.e, 19, 9.5, load_time=self.lt)
m = pyomo.ConcreteModel()
dl.populate_model(m, mode="integer")
obj = pyomo.sum_product([0] * 2 + [1] * 2 + [0] * 2, dl.model.p_el_vars, dl.model.p_el_vars)
m.o = pyomo.Objective(expr=obj)
with self.assertWarns(UserWarning):
dl.update_model(mode="integer")
results = solve_model(m)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
dl.update_schedule()
assert_equal_array(np.rint(dl.p_el_schedule[:6]), [0, 19, 19, 0, 0, 0])
for t in range(3):
dl.timer.mpc_update()
if t == 0:
with self.assertWarns(UserWarning):
dl.update_model(mode="integer")
else:
dl.update_model(mode="integer")
results = solve_model(m)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
dl.update_schedule()
assert_equal_array(dl.p_el_schedule, [0, 19, 19, 0, 0, 0, 19, 19, 0])
return
def test_infeasible_integer(self):
e = get_env(1, 9)
model = pyomo.ConcreteModel()
with self.assertWarns(UserWarning):
dl = DeferrableLoad(e, 19, 9.5, load_time=self.lt)
dl.populate_model(model, mode="integer")
dl.update_model(mode="integer")
obj = pyomo.sum_product(dl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
self.assertEqual(results.solver.termination_condition, TerminationCondition.infeasible)
model = pyomo.ConcreteModel()
with self.assertWarns(UserWarning):
dl = DeferrableLoad(self.e, 19, 19, load_time=self.lt)
dl.populate_model(model, mode="integer")
dl.update_model(mode="integer")
obj = pyomo.sum_product(dl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
logger = logging.getLogger("pyomo.core")
oldlevel = logger.level
logger.setLevel(logging.ERROR)
results = solve_model(model)
logger.setLevel(oldlevel)
self.assertEqual(results.solver.termination_condition, TerminationCondition.infeasible)
model = pyomo.ConcreteModel()
with self.assertWarns(UserWarning):
dl = DeferrableLoad(self.e, 19, 19*3/4, load_time=self.lt)
dl.populate_model(model, mode="integer")
dl.update_model(mode="integer")
obj = pyomo.sum_product(dl.model.p_el_vars)
model.o = pyomo.Objective(expr=obj)
results = solve_model(model)
self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal)
dl.update_schedule()
assert_equal_array(dl.p_el_schedule[:6], [0, 19, 19, 19, 0, 0])
return
def test_objective(self):
with self.assertWarns(UserWarning):
dl = DeferrableLoad(self.e, 19, 19, load_time=self.lt)
model = pyomo.ConcreteModel()
dl.populate_model(model)
dl.get_objective()
return
def test_update_integer(self):
e = get_env(9, 9)
model = pyomo.ConcreteModel()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
dl = DeferrableLoad(e, 19, 19, load_time=[1] * 9)
dl.populate_model(model, "integer")
dl.update_model("integer")
self.assertEqual(0, len(w))
return
class TestFixedLoad(unittest.TestCase):
def test_populate_model(self):
e = get_env(2, 4)
load = np.arange(1, 5)
model = pyomo.ConcreteModel()
self.fl = FixedLoad(e, method=0, demand=load)
self.fl.populate_model(model)
self.fl.update_model()
model.o = pyomo.Objective(expr=pyomo.sum_product(self.fl.model.p_el_vars))
solve_model(model)
for t in range(2):
self.assertEqual(self.fl.model.p_el_vars[t].value, load[t])
return
def test_unit_conversion(self):
ti = Timer(step_size=1800,
op_horizon=48,
mpc_horizon=24*365,
mpc_step_width=1)
we = Weather(ti)
pr = Prices(ti)
e = Environment(ti, we, pr)
fl = FixedLoad(e, method=1, annual_demand=25)
# loadcurve in Wh
self.assertEqual(48 * 365, len(fl.loadcurve))
self.assertAlmostEqual(25*1000, sum(fl.loadcurve)*0.5, places=5)
# p_el_schedule in kWh
self.assertEqual(24*365, len(fl.p_el_schedule))
self.assertAlmostEqual(25/2, sum(fl.p_el_schedule)*0.5, delta=25/2/10)
return
class TestElectricalEntity(unittest.TestCase):
def setUp(self):
e = get_env(4, 8, 4)
self.ee = ElectricalEntity(e)
self.ee.environment = e
return
def test_update_schedule(self):
m = pyomo.ConcreteModel()
self.ee.populate_model(m)
for t in range(4):
self.ee.model.p_el_vars[t].value = t
a = np.arange(4)
self.ee.update_schedule()
assert_equal_array(self.ee.p_el_schedule[:4], a)
self.ee.timer.mpc_update()
self.ee.update_schedule()
assert_equal_array(self.ee.p_el_schedule[4:], a)
return
def test_calculate_costs(self):
self.ee.p_el_schedule = np.array([10]*4 + [-20]*4)
self.ee.p_el_ref_schedule = np.array([4]*4 + [-4]*4)
prices = np.array([10]*4 + [20]*4)
costs = calculate_costs(self.ee, prices=prices, feedin_factor=0.5)
self.assertEqual(-100, costs)
costs = calculate_costs(self.ee, timestep=4, prices=prices)
self.assertEqual(100, costs)
self.ee.load_schedule("ref")
costs = calculate_costs(self.ee, prices=prices)
self.assertEqual(40, costs)
return
def test_calculate_adj_costs(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
prices = np.array([10] * 4 + [20] * 4)
costs_adj = calculate_adj_costs(self.ee, "ref", prices=prices)
self.assertEqual(6*10 + 16*20, costs_adj)
costs_adj = calculate_adj_costs(self.ee, "ref", prices=prices, total_adjustments=False)
self.assertEqual(16 * 20, costs_adj)
self.ee.copy_schedule("ref")
costs_adj = calculate_adj_costs(self.ee, "ref", prices=prices)
self.assertEqual(0, costs_adj)
return
def test_calculate_adj_power(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
adj_power = calculate_adj_power(self.ee, "ref")
assert_equal_array(adj_power, [6] * 4 + [16] * 4)
adj_power = calculate_adj_power(self.ee, "ref", total_adjustments=False)
assert_equal_array(adj_power, [0] * 4 + [16] * 4)
adj_power = calculate_adj_power(self.ee, "default")
assert_equal_array(adj_power, [0] * 8)
self.ee.load_schedule("ref")
adj_power = calculate_adj_power(self.ee, "ref")
assert_equal_array(adj_power, [0] * 8)
self.ee.copy_schedule("default")
adj_power = calculate_adj_power(self.ee, "default")
assert_equal_array(adj_power, [0] * 8)
return
def test_calculate_adj_energy(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
adj_energy = calculate_adj_energy(self.ee, "ref")
self.assertEqual(6 + 16, adj_energy)
adj_energy = calculate_adj_energy(self.ee, "ref", total_adjustments=False)
self.assertEqual(16, adj_energy)
adj_energy = calculate_adj_energy(self.ee, "default")
self.assertEqual(0, adj_energy)
self.ee.copy_schedule(src="ref")
adj_energy = calculate_adj_energy(self.ee, "ref")
self.assertEqual(0, adj_energy)
adj_energy = calculate_adj_energy(self.ee, "ref", total_adjustments=False)
self.assertEqual(0, adj_energy)
self.ee.load_schedule("ref")
adj_energy = calculate_adj_energy(self.ee, "ref")
self.assertEqual(0, adj_energy)
adj_energy = calculate_adj_energy(self.ee, "default")
self.assertEqual(0, adj_energy)
return
def test_metric_delta_g(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
g = metric_delta_g(self.ee, "ref")
self.assertEqual(1-30/8, g)
g = metric_delta_g(self.ee, "default")
self.assertEqual(0, g)
return
def test_peak_to_average_ratio(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
ratio = peak_to_average_ratio(self.ee)
self.assertEqual(20/5, ratio)
self.ee.load_schedule("ref")
with self.assertWarns(RuntimeWarning):
ratio = peak_to_average_ratio(self.ee)
self.assertEqual(np.inf, ratio)
return
def test_peak_reduction_ratio(self):
self.ee.p_el_schedule = np.array([10] * 4 + [-20] * 4)
self.ee.p_el_ref_schedule = np.array([4] * 4 + [-4] * 4)
ratio = peak_reduction_ratio(self.ee, "ref")
self.assertEqual((20-4)/4, ratio)
self.ee.p_el_ref_schedule = np.array([4] * 8)
ratio = peak_reduction_ratio(self.ee, "ref")
self.assertEqual((20-4)/4, ratio)
ratio = peak_reduction_ratio(self.ee, "default")
self.assertEqual(0, ratio)
self.ee.load_schedule("ref")
ratio = peak_reduction_ratio(self.ee, "ref")
self.assertEqual(0, ratio)
return
def test_self_consumption(self):
# properly tested in CityDistrict
self.ee.p_el_schedule = np.array([10]*4 + [-20]*4)
self.assertEqual(0, self_consumption(self.ee))
return
def test_autarky(self):
# properly tested in CityDistrict
self.ee.p_el_schedule = np.array([10]*4 + [-20]*4)
self.assertEqual(0, autarky(self.ee))
return
def test_objective(self):
model = pyomo.ConcreteModel()
self.ee.populate_model(model)
self.ee.get_objective()
return
def test_new_objective(self):
model = pyomo.ConcreteModel()
self.ee.populate_model(model)
for t in range(4):
self.ee.model.p_el_vars[t].setlb(t)
self.ee.model.p_el_vars[t].setub(t)
self.ee.set_objective("peak-shaving")
obj = self.ee.get_objective()
model.o = pyomo.Objective(expr=obj)
solve_model(model)
obj = self.ee.get_objective()
self.assertEqual(sum(t**2 for t in range(4)), pyomo.value(obj))
self.ee.set_objective("max-consumption")
with self.assertRaises(ValueError):
obj = self.ee.get_objective()
model = pyomo.ConcreteModel()
self.ee.populate_model(model)
for t in range(4):
self.ee.model.p_el_vars[t].setlb(t)
self.ee.model.p_el_vars[t].setub(t)
obj = self.ee.get_objective()
model.o = pyomo.Objective(expr=obj)
solve_model(model)
self.assertAlmostEqual(3, pyomo.value(obj), 4)
return
class TestElectricalHeater(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.eh = ElectricalHeater(e, 10, 10, 0.8)
return
def test_lower_activation(self):
e = get_env(4, 8)
eh = ElectricalHeater(e, 10, lower_activation_limit=0.5)
m = pyomo.ConcreteModel()
eh.populate_model(m, "integer")
eh.update_model("integer")
obj = pyomo.sum_product(eh.model.p_el_vars, eh.model.p_el_vars)
obj += -2 * 3 * pyomo.sum_product(eh.model.p_el_vars)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
eh.update_schedule()
assert_equal_array(eh.p_el_schedule[:4], [5] * 4)
return
def test_update_schedule(self):
e = get_env(2, 2)
eh = ElectricalHeater(e, 10, lower_activation_limit=0.5)
m = pyomo.ConcreteModel()
eh.populate_model(m)
eh.update_model()
obj = eh.model.p_el_vars[0] - eh.model.p_el_vars[1]
eh.model.p_el_vars[0].setlb(5.0)
eh.model.p_el_vars[1].setub(0.05)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
eh.update_schedule()
assert_equal_array(eh.p_el_schedule, [5, 0.05])
assert_equal_array(eh.p_th_heat_schedule, [-5, -0.05])
assert_equal_array(eh.p_th_heat_state_schedule, [True, False])
return
class TestElectricVehicle(unittest.TestCase):
def setUp(self):
e = get_env(6, 9)
self.ct = [1, 1, 1, 0, 0, 0, 1, 1, 1]
self.ev = ElectricalVehicle(e, 10, 20, p_el_max_discharge=20, soc_init=0.5, charging_time=self.ct)
return
def test_populate_model(self):
model = pyomo.ConcreteModel()
self.ev.populate_model(model)
model.c1 = pyomo.Constraint(expr=self.ev.model.e_el_vars[2] == 10)
model.c2 = pyomo.Constraint(expr=self.ev.model.e_el_vars[0] == 5)
obj = pyomo.sum_product(self.ev.model.p_el_demand_vars, self.ev.model.p_el_demand_vars)
model.o = pyomo.Objective(expr=obj)
result = solve_model(model)
self.assertEqual(31, result.Problem[0].number_of_variables)
var_sum = pyomo.value(pyomo.quicksum(self.ev.model.p_el_vars[t] for t in range(1, 6)))
self.assertAlmostEqual(20, var_sum, places=2)
var_sum = pyomo.value(pyomo.quicksum(
self.ev.model.p_el_supply_vars[t] + self.ev.model.p_el_demand_vars[t] for t in range(1, 6)))
self.assertAlmostEqual(20, var_sum, places=2)
return
def test_update_model(self):
model = pyomo.ConcreteModel()
self.ev.populate_model(model)
self.ev.update_model()
model.o = pyomo.Objective(expr=self.ev.get_objective())
solve_model(model)
self.assertAlmostEqual(10, self.ev.model.e_el_vars[2].value, places=5)
self.assertAlmostEqual(2, self.ev.model.e_el_vars[3].value, places=5)
self.ev.timer.mpc_update()
self.ev.update_model()
solve_model(model)
for t, c in enumerate(self.ct[1:7]):
if c:
self.assertEqual(20, self.ev.model.p_el_demand_vars[t].ub)
self.assertEqual(20, self.ev.model.p_el_supply_vars[t].ub)
self.assertEqual(0, self.ev.model.p_el_drive_vars[t].ub)
else:
self.assertEqual(0, self.ev.model.p_el_demand_vars[t].ub)
self.assertEqual(0, self.ev.model.p_el_supply_vars[t].ub)
self.assertIsNone(self.ev.model.p_el_drive_vars[t].ub)
self.assertAlmostEqual(10, self.ev.model.e_el_vars[1].value, places=5)
self.assertAlmostEqual(2, self.ev.model.e_el_vars[2].value, places=5)
self.assertLessEqual(1.6, self.ev.model.e_el_vars[5].value)
self.ev.update_schedule()
self.ev.timer.mpc_update()
self.ev.timer.mpc_update()
self.ev.update_model()
solve_model(model)
self.assertAlmostEqual(10, self.ev.model.e_el_vars[5].value, places=5)
return
def test_get_objective(self):
model = pyomo.ConcreteModel()
self.ev.populate_model(model)
self.ev.update_model()
obj = self.ev.get_objective(11)
for i in range(6):
ref = (i + 1) / 21 * 6 * 11
coeff = obj.args[i].args[0].args[0]
self.assertAlmostEqual(ref, coeff, places=5)
return
def test_no_charge_time(self):
e = get_env(6, 9)
ev = ElectricalVehicle(e, 37.0, 11.0)
assert_equal_array(ev.charging_time, [1]*9)
e = get_env(28, 96*24-12)
ev = ElectricalVehicle(e, 37.0, 11.0)
assert_equal_array(ev.charging_time, np.tile([1] * 24 + [0] * 48 + [1] * 24, 24)[:-12])
return
def test_no_discharge(self):
model = pyomo.ConcreteModel()
e = get_env(6, 9)
ev = ElectricalVehicle(e, 10.0, 40.0, charging_time=self.ct)
ev.populate_model(model)
ev.update_model()
model.o = pyomo.Objective(expr=ev.model.p_el_vars[0] + ev.model.p_el_vars[1])
solve_model(model)
ev.update_schedule()
assert_equal_array(ev.p_el_schedule[:4], [0, 0, 5*4, 0])
assert_equal_array(ev.p_el_demand_schedule[:4], [0, 0, 5 * 4, 0])
assert_equal_array(ev.p_el_supply_schedule[:4], [0, 0, 0, 0])
assert_equal_array(ev.e_el_schedule[:4], [5, 5, 10, 2])
model = pyomo.ConcreteModel()
e = get_env(6, 9)
ev = ElectricalVehicle(e, 10.0, 40.0, p_el_max_discharge=8, charging_time=self.ct)
ev.populate_model(model)
ev.update_model()
model.o = pyomo.Objective(expr=ev.model.p_el_vars[0] + ev.model.p_el_vars[1])
solve_model(model)
ev.update_schedule()
assert_equal_array(ev.p_el_schedule[:4], [-8, -8, 9 * 4, 0])
assert_equal_array(ev.p_el_demand_schedule[:4], [0, 0, 9 * 4, 0])
assert_equal_array(ev.p_el_supply_schedule[:4], [8, 8, 0, 0])
assert_equal_array(ev.e_el_schedule[:4], [3, 1, 10, 2])
return
def test_partial_charge(self):
for step_size in [1, 2, 3, 6, 12]:
with self.subTest("step_size: {}".format(step_size)):
e = get_env(step_size, 12, step_size)
self.ct = [1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0]
self.ev = ElectricalVehicle(e, 10, 20, 0.5, charging_time=self.ct)
m = pyomo.ConcreteModel()
self.ev.populate_model(m)
m.o = pyomo.Objective(expr=pyomo.sum_product(
self.ev.model.p_el_vars,
self.ev.model.p_el_vars
))
for i in range(0, 12, step_size):
self.ev.update_model(m)
solve_model(m)
self.ev.update_schedule()
e.timer.mpc_update()
assert_equal_array(self.ev.p_el_schedule, [5 / 3 * 4] * 3 + [0] * 3 + [8 / 3 * 4] * 3 + [0] * 3)
step_size = 12
e = get_env(step_size, 12, step_size)
self.ev = ElectricalVehicle(e, 10, 20, 20, soc_init=0.5, charging_time=self.ct)
m = pyomo.ConcreteModel()
self.ev.populate_model(m)
self.ev.update_model(m)
m.o = pyomo.Objective(expr=self.ev.model.p_el_vars[2] + self.ev.model.p_el_vars[7])
solve_model(m)
self.ev.update_schedule()
self.assertAlmostEqual(0, self.ev.p_el_schedule[2], 4)
self.assertAlmostEqual(-2 * 4, self.ev.p_el_schedule[7], 4)
return
def test_bad_charging_times(self):
e = get_env(3, 12)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
self.ev = ElectricalVehicle(e, 10, 8, soc_init=0.5, charging_time=[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0])
self.assertEqual(0, len(w))
with self.assertWarns(UserWarning):
self.ev = ElectricalVehicle(e, 10, 8, soc_init=0.5, charging_time=[1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0])
with self.assertWarns(UserWarning):
self.ev = ElectricalVehicle(e, 10, 8, soc_init=0.5, charging_time=[1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0])
with self.assertWarns(UserWarning):
self.ev = ElectricalVehicle(e, 10, 8, soc_init=0.5, charging_time=[1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1])
return
def test_inital_charging_times(self):
for step_size in [1, 2, 3, 6, 12]:
with self.subTest("step_size: {}".format(step_size)):
e = get_env(step_size, 12, step_size)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
self.ev = ElectricalVehicle(e, 10, 8, soc_init=0.8,
charging_time=[0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0])
self.assertEqual(0, len(w))
m = pyomo.ConcreteModel()
self.ev.populate_model(m)
m.o = pyomo.Objective(expr=pyomo.sum_product(self.ev.model.p_el_vars, self.ev.model.p_el_vars))
for i in range(0, 12, step_size):
self.ev.update_model(m)
solve_model(m)
self.ev.update_schedule()
e.timer.mpc_update()
assert_equal_array(self.ev.p_el_schedule, [0, 8] + [0] * 4 + [8] * 4 + [0] * 2)
assert_equal_array(self.ev.p_el_demand_schedule, [0, 8] + [0] * 4 + [8] * 4 + [0] * 2)
assert_equal_array(self.ev.p_el_supply_schedule, [0] * 12)
assert_equal_array(self.ev.e_el_schedule, [8, 10] + [2] * 4 + [4, 6, 8, 10] + [2] * 2)
return
class TestHeatPump(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.hp = HeatPump(e, 10, cop=np.full(8, 11))
return
def test_update_model(self):
m = pyomo.ConcreteModel()
self.hp.populate_model(m)
self.hp.update_model()
c = self.hp.model.p_coupl_constr[0]
f, l = pyomo.current.decompose_term(c.body)
self.assertTrue(f)
for coeff, value in l:
if value is self.hp.model.p_el_vars[0]:
self.assertEqual(11, coeff)
if value is self.hp.model.p_th_heat_vars[0]:
self.assertEqual(1, coeff)
if value is None:
self.assertEqual(0, coeff)
return
def test_lower_activation(self):
e = get_env(4, 8)
hp = HeatPump(e, 10, lower_activation_limit=0.5)
m = pyomo.ConcreteModel()
hp.populate_model(m, "integer")
hp.update_model("integer")
obj = pyomo.sum_product(hp.model.p_th_heat_vars, hp.model.p_th_heat_vars)
obj += 2 * 3 * pyomo.sum_product(hp.model.p_th_heat_vars)
m.o = pyomo.Objective(expr=obj)
solve_model(m)
hp.update_schedule()
assert_equal_array(hp.p_th_heat_schedule[:4], [-5] * 4)
return
class TestPhotovoltaic(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.pv = Photovoltaic(e, 0, 30, 0.0, 0.3)
return
def test_calculate_co2(self):
self.pv.p_el_schedule = - np.array([10]*8)
self.pv.p_el_ref_schedule = - np.array([4]*8)
co2_em = np.array([1111]*8)
co2 = calculate_co2(self.pv, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_PV, co2)
co2 = calculate_co2(self.pv, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_PV, co2)
self.pv.load_schedule("ref")
co2 = calculate_co2(self.pv, co2_emissions=co2_em)
self.assertEqual(8*constants.CO2_EMISSIONS_PV, co2)
return
def test_objective(self):
model = pyomo.ConcreteModel()
self.pv.populate_model(model)
self.pv.get_objective()
return
class TestPrices(unittest.TestCase):
def test_cache(self):
Prices.co2_price_cache = None
Prices.da_price_cache = None
Prices.tou_price_cache = None
Prices.tou_price_cache_year = None
ti = Timer(op_horizon=4, mpc_horizon=8, step_size=3600,
initial_date=(2015, 1, 1), initial_time=(1, 0, 0))
pr = Prices(ti)
self.assertEqual(35040, len(pr.da_price_cache))
self.assertEqual(35040, len(pr.tou_price_cache))
self.assertEqual(35040, len(pr.co2_price_cache))
self.assertTrue(np.allclose(pr.tou_prices, [23.2621]*6 + [42.2947]*2))
Prices.da_price_cache[4] = 20
ti = Timer(op_horizon=4, mpc_horizon=8, step_size=900,
initial_date=(2015, 1, 1), initial_time=(1, 0, 0))
pr = Prices(ti)
self.assertAlmostEqual(20, pr.da_prices[0], places=4)
return
def test_unavailable_year(self):
ti = Timer(op_horizon=4, mpc_horizon=8, step_size=3600,
initial_date=(9999, 1, 1), initial_time=(1, 0, 0))
with self.assertWarnsRegex(UserWarning, "9999"):
Prices(ti)
return
class TestThermalCoolingStorage(unittest.TestCase):
def setUp(self):
e = get_env(3)
self.tcs = ThermalCoolingStorage(e, 40, 0.5)
return
def test_update_schedule(self):
m = pyomo.ConcreteModel()
self.tcs.populate_model(m)
self.tcs.update_model()
for t in range(3):
self.tcs.model.p_th_cool_vars[t].setub(t)
self.tcs.model.p_th_cool_vars[t].setlb(t)
m.o = pyomo.Objective(expr=pyomo.sum_product(self.tcs.model.p_th_cool_vars))
solve_model(m)
a = np.arange(3)
self.tcs.update_schedule()
assert_equal_array(self.tcs.p_th_cool_schedule, a)
assert_equal_array(self.tcs.e_th_cool_schedule, [20, 20.25, 20.75])
return
class TestThermalHeatingStorage(unittest.TestCase):
def setUp(self):
e = get_env(3)
self.ths = ThermalHeatingStorage(e, 40, 0.5)
return
def test_update_schedule(self):
m = pyomo.ConcreteModel()
self.ths.populate_model(m)
self.ths.update_model()
for t in range(3):
self.ths.model.p_th_heat_vars[t].setub(t)
self.ths.model.p_th_heat_vars[t].setlb(t)
m.o = pyomo.Objective(expr=pyomo.sum_product(self.ths.model.p_th_heat_vars))
solve_model(m)
a = np.arange(3)
self.ths.update_schedule()
assert_equal_array(self.ths.p_th_heat_schedule, a)
assert_equal_array(self.ths.e_th_heat_schedule, [20, 20.25, 20.75])
return
class TestThermalEntityCooling(unittest.TestCase):
def setUp(self):
e = get_env(4, 8, 4)
self.tc = ThermalEntityCooling(e)
self.tc.environment = e
return
def test_update_schedule(self):
m = pyomo.ConcreteModel()
self.tc.populate_model(m)
self.tc.update_model()
for t in range(4):
self.tc.model.p_th_cool_vars[t].setub(t)
self.tc.model.p_th_cool_vars[t].setlb(t)
m.o = pyomo.Objective(expr=pyomo.sum_product(self.tc.model.p_th_cool_vars))
solve_model(m)
a = np.arange(4)
self.tc.update_schedule()
assert_equal_array(self.tc.p_th_cool_schedule[:4], a)
self.tc.timer.mpc_update()
self.tc.update_schedule()
assert_equal_array(self.tc.p_th_cool_schedule[4:], a)
return
class TestThermalEntityHeating(unittest.TestCase):
def setUp(self):
e = get_env(4, 8, 4)
self.th = ThermalEntityHeating(e)
self.th.environment = e
return
def test_update_schedule(self):
m = pyomo.ConcreteModel()
self.th.populate_model(m)
self.th.update_model()
for t in range(4):
self.th.model.p_th_heat_vars[t].setub(t)
self.th.model.p_th_heat_vars[t].setlb(t)
m.o = pyomo.Objective(expr=pyomo.sum_product(self.th.model.p_th_heat_vars))
solve_model(m)
a = np.arange(4)
self.th.update_schedule()
assert_equal_array(self.th.p_th_heat_schedule[:4], a)
self.th.timer.mpc_update()
self.th.update_schedule()
assert_equal_array(self.th.p_th_heat_schedule[4:], a)
return
class TestSpaceCooling(unittest.TestCase):
def setUp(self):
e = get_env(2, 4)
self.load = np.arange(1, 5)
self.sc = SpaceCooling(e, method=0, loadcurve=self.load)
return
def test_model(self):
m = pyomo.ConcreteModel()
self.sc.populate_model(m)
self.sc.update_model()
m.o = pyomo.Objective(expr=self.sc.model.p_th_cool_vars[0]+self.sc.model.p_th_cool_vars[1])
r = solve_model(m)
assert_equal_array(self.sc.p_th_cool_schedule, self.load)
self.assertAlmostEqual(self.load[0], self.sc.model.p_th_cool_vars[0].value)
self.assertAlmostEqual(self.load[1], self.sc.model.p_th_cool_vars[1].value)
return
class TestSpaceHeating(unittest.TestCase):
def setUp(self):
e = get_env(2, 4)
self.load = np.arange(1, 5)
self.sh = SpaceHeating(e, method=0, loadcurve=self.load)
return
def test_model(self):
m = pyomo.ConcreteModel()
self.sh.populate_model(m)
self.sh.update_model()
m.o = pyomo.Objective(expr=self.sh.model.p_th_heat_vars[0]+self.sh.model.p_th_heat_vars[1])
r = solve_model(m)
assert_equal_array(self.sh.p_th_heat_schedule, self.load)
self.assertAlmostEqual(self.load[0], self.sh.model.p_th_heat_vars[0].value)
self.assertAlmostEqual(self.load[1], self.sh.model.p_th_heat_vars[1].value)
return
class TestTimer(unittest.TestCase):
def setUp(self):
self.timer = Timer(mpc_horizon=192, mpc_step_width=4,
initial_date=(2015, 1, 15), initial_time=(12, 0, 0))
self.timer._dt = datetime.datetime(2015, 1, 15, 13)
return
def test_time_in_year(self):
self.assertEqual(1396, self.timer.time_in_year())
self.assertEqual(1392, self.timer.time_in_year(from_init=True))
return
def test_time_in_week(self):
self.assertEqual(340, self.timer.time_in_week())
self.assertEqual(336, self.timer.time_in_week(from_init=True))
return
def test_time_in_day(self):
self.assertEqual(52, self.timer.time_in_day())
self.assertEqual(48, self.timer.time_in_day(from_init=True))
return
def test_more_than_one_year(self):
for s, h, horizon in [(s, h, horizon) for s in [300, 900, 1800, 3600]
for h in range(int(86400*365/s)-1, int(86400*365/s)+3)
for horizon in ["op_horizon", "mpc_horizon"]]:
year_horizon = int(86400 * 365 / s)
kwargs = {"step_size": s, "initial_date": (2015, 1, 15), "initial_time": (12, 0, 0)}
kwargs[horizon] = h
with self.subTest(msg="step_size={} horizon={} horizon_name={}".format(s, h, horizon)):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
t = Timer(**kwargs)
we = Weather(t)
if year_horizon < h:
self.assertEqual(len(w), 1, msg="No warning was thrown even though mpc_horizon / op_horizon {} "
"is larger than one year, which would be a horizon of {}"
.format(h, year_horizon))
self.assertIn(horizon, str(w[0].message))
self.assertEqual(len(we.p_ambient), year_horizon)
self.assertEqual(len(we.phi_ambient), year_horizon)
self.assertEqual(len(we.q_diffuse), year_horizon)
self.assertEqual(len(we.q_direct), year_horizon)
self.assertEqual(len(we.rad_earth), year_horizon)
self.assertEqual(len(we.rad_sky), year_horizon)
self.assertEqual(len(we.v_wind), year_horizon)
self.assertEqual(len(we.t_ambient), year_horizon)
self.assertEqual(len(we.current_p_ambient), year_horizon)
self.assertEqual(len(we.current_phi_ambient), year_horizon)
self.assertEqual(len(we.current_q_diffuse), year_horizon)
self.assertEqual(len(we.current_q_direct), year_horizon)
self.assertEqual(len(we.current_rad_earth), year_horizon)
self.assertEqual(len(we.current_rad_sky), year_horizon)
self.assertEqual(len(we.current_v_wind), year_horizon)
self.assertEqual(len(we.current_t_ambient), year_horizon)
else:
self.assertEqual(len(w), 0)
return
class TestWindEnergyConverter(unittest.TestCase):
def setUp(self):
e = get_env(4, 8)
self.wec = WindEnergyConverter(e, np.array([0, 10]), np.array([0, 10]))
return
def test_calculate_co2(self):
self.wec.p_el_schedule = - np.array([10] * 8)
self.wec.p_el_ref_schedule = - np.array([4] * 8)
co2_em = np.array([1111]*8)
co2 = calculate_co2(self.wec, co2_emissions=co2_em)
self.assertEqual(20.0*constants.CO2_EMISSIONS_WIND, co2)
co2 = calculate_co2(self.wec, timestep=4, co2_emissions=co2_em)
self.assertEqual(10.0*constants.CO2_EMISSIONS_WIND, co2)
self.wec.load_schedule("ref")
co2 = calculate_co2(self.wec, co2_emissions=co2_em)
self.assertEqual(8.0*constants.CO2_EMISSIONS_WIND, co2)
return
def test_objective(self):
model = pyomo.ConcreteModel()
self.wec.populate_model(model)
self.wec.get_objective()
return
def get_env(op_horizon, mpc_horizon=None, mpc_step_width=1):
ti = Timer(op_horizon=op_horizon,
mpc_horizon=mpc_horizon,
mpc_step_width=mpc_step_width)
we = Weather(ti)
pr = Prices(ti)
return Environment(ti, we, pr)
def assert_equal_array(a: np.ndarray, expected):
if not np.allclose(a, expected):
expected = | np.array(expected) | numpy.array |
#!/usr/bin/env python
from __future__ import absolute_import
import unittest
import numpy as np
import os
import tempfile
import training_data
class TestTrainingData(unittest.TestCase):
def test_add(self):
td = training_data.training_data()
self.assertTrue(np.array_equal(td.get_x(), np.empty([0, 4, 4], dtype=np.int)))
self.assertTrue(np.array_equal(td.get_y_digit(), np.empty([0, 1], dtype=np.int)))
self.assertTrue(np.allclose(td.get_reward(), np.empty([0, 1], dtype=np.float)))
self.assertTrue(np.array_equal(td.get_next_x(), np.empty([0, 4, 4], dtype=np.int)))
self.assertTrue(np.array_equal(td.get_done(), np.empty([0, 1], dtype=np.bool)))
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]), True)
self.assertTrue(np.array_equal(td.get_x(), np.ones([1, 4, 4], dtype=np.int)))
self.assertTrue(np.array_equal(td.get_y_digit(), np.array([[1]], dtype=np.int)))
self.assertTrue(np.allclose(td.get_reward(), np.array([[4]], dtype=np.float)))
self.assertTrue(np.array_equal(td.get_next_x(), | np.zeros([1, 4, 4], dtype=np.int) | numpy.zeros |
import logging
from typing import Optional
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.linalg import LinAlgError
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class L1QR:
def __init__(self, y: pd.Series, x: pd.DataFrame, alpha: float) -> None:
"""Python implementation of the L1 norm QR algorithm of
<NAME> (2008): L1-Norm Quantile Regression, http://dx.doi.org/10.1198/106186008X289155
Args:
y: Vector of response data
x: Matrix of covariates
alpha: Quantile of interest
"""
self.x = x.to_numpy()
self.y = y.to_numpy()
self.var_names = x.columns
self.alpha = alpha
# set by fit()
self.beta0: Optional[np.array] = None
self.beta: Optional[np.array] = None
self.s: Optional[np.array] = None
self.b0: Optional[pd.Series] = None
self.b: Optional[pd.DataFrame] = None
def fit(self, s_max: float = np.inf) -> None:
"""Estimate the model.
Args:
s_max: Stop the algorithm prematurely when the L1 norm of the slope coefficients reaches s_max
"""
n, k = self.x.shape
if self.y.size != n:
raise Exception('y and x have different number of rows!')
logger.info(f'Initialization lasso quantile regression for n={n}, k={k}, and alpha={self.alpha}')
xc = np.hstack((np.ones((n, 1)), self.x)) # Store x a second time with intercept
eps1 = 10 ** -10 # Some low value
eps2 = 10 ** -10 # Convergence criterion
max_steps = n * np.min((k, n - 1)) # Maximum number of steps for the algorithm
ind_n = np.arange(n) # Index of the observations
ind_k = np.arange(k) # Index of the variables
beta0 = np.zeros((max_steps + 1, 1)) # Stores the estimates of the constant term
beta = np.zeros((max_steps + 1, k)) # Stores the estimates of the slope parameters
s = np.zeros(max_steps + 1) # Stores the penalty parameter
y_can_be_ordered_strictly = np.unique(self.y).size != n
if y_can_be_ordered_strictly:
logger.info('Adding noise to y because y contains duplicate values')
self.y += np.random.normal(loc=0, scale=10 ** -5, size=self.y.size)
logger.info('Finding initial solution')
# There are actually two cases, first if n*tau is integer, second if tau*n is non-integer.
# Here I assume that in the latter case all the weight is on the first component (see section 2.2)
ini_beta0 = np.sort(self.y)[int(np.floor(self.alpha * n))] # Initial beta0 (see 2.2.1)
ini_beta = np.zeros(k) # Initial beta (see 2.2.1)
ind_e = np.array(int(np.argwhere(self.y == ini_beta0))) # Index of the first point in the elbow
ind_l = ind_n[self.y < self.y[ind_e]] # All points that are left of the elbow
ind_r = ind_n[self.y > self.y[ind_e]] # All points that are right of the elbow
residual = self.y - ini_beta0 # Initial residuals
# Add the first variable to the active set
inactive = ind_k # All variables not in V
tmp_e, tmp_l, tmp_r = ind_e, ind_l, ind_r # Create a copy of the index sets
lambda_var = np.zeros((2, inactive.size)) # First row: sign=1, second row: sign=-1
lambda_var[lambda_var == 0] = -np.inf # Initially set to -inf (want to maximize lambda)
b = np.array([0, 1]) # The 1_0 vector (see p. 171 bottom)
nu_var = np.zeros((2, inactive.size, b.size)) # 3d array: nu for sign=1 in first dimension, sign=-1 in second
for j_idx, j_star in enumerate(inactive):
x_v = xc[:, np.append(0, j_star + 1)]
# Sign of the next variable to include may be either positive or negative
for sign in (1, -1):
index = np.where(sign == 1, 0, 1) # Index in nu_var and lambda_var
# Combination of (2.10) and (2.11)
x0 = np.vstack((np.hstack((1, np.mat(self.x)[tmp_e, j_star])), np.hstack((0, sign))))
try: # Check if x0 has full rank
nu_tmp = np.linalg.solve(x0, b) # Solve system (p. 171 bottom)
nu_var[index, j_idx, :] = nu_tmp
# Store sets that are used to compute -lambda* (p. 172)
x_l = x_v.take(tmp_l, axis=0, mode='clip')
x_r = x_v.take(tmp_r, axis=0, mode='clip')
# Save lambda achieved by the current variable. If sign of last entry != sign then leave at -inf.
if np.sign(nu_tmp[-1]) == sign:
lambda_var[index, j_idx] = -((1 - self.alpha) * np.dot(x_l, nu_tmp).sum() -
self.alpha * np.dot(x_r, nu_tmp).sum())
except LinAlgError:
logger.debug(f'sign: {sign}')
# Select the nu corresponding to the maximum lambda and store the maximum lambda
nu_var = nu_var[lambda_var.argmax(axis=0), np.arange(inactive.size), :]
lambda_var = lambda_var.max(axis=0)
# Store the active variable
ind_v = inactive[lambda_var.argmax()]
# Store initial nu0 and nu
nu0 = nu_var[ind_v, 0]
nu = nu_var[ind_v, 1:]
beta0[0] = ini_beta0
beta[0] = ini_beta
logger.debug(f'Initial beta0: {ini_beta0}')
logger.debug(f'Initial beta: {ini_beta}')
# Main loop
logger.info('Entering main loop')
drop = False
idx = 0
while idx < max_steps:
logger.debug(f'Index: {idx}')
idx += 1
# Calculate how far we need to move (the minimum distance between points and elbow)
if np.atleast_1d(nu).size == 1: # Make sure scalar array is converted to float, causes problems with np.dot
nu = np.float(nu)
# (2.14), nu0 + x'*nu where x is without i in elbow
gam = nu0 + np.dot(self.x.take(ind_n[np.in1d(ind_n, ind_e, invert=True)], axis=0).take(ind_v, axis=1), nu)
gam = np.ravel(gam) # Flatten the array
delta1 = np.delete(residual, ind_e, 0) / gam # This is s - s_l in (2.14)
# Check whether all points are in the elbow or if we still need to move on
if np.sum(delta1 <= eps2) == delta1.size:
delta = np.inf
else:
delta = delta1[delta1 > eps1].min()
# Test if we need to remove some variable j from the active set
if idx > 1:
delta2 = np.array(-beta[idx - 1, ind_v] / nu)
if np.sum(delta2 <= eps2) == delta2.size:
tmpz_remove = np.inf
else:
tmpz_remove = delta2[delta2 > eps1].min()
if tmpz_remove < delta:
drop = True
delta = tmpz_remove
else:
drop = False
# Check if we need to continue or if we are done
if delta == np.inf:
logger.info(f'Finished, delta = inf')
break
# Update the shrinkage parameter
s[idx] = s[idx - 1] + delta
# Prepare the next steps depending if we drop a variable or not
if drop:
tmp_delta = delta2[delta2 > eps1] # All deltas larger than eps2
tmp_ind = ind_v[delta2 > eps1] # All V larger than eps2
j1 = tmp_ind[tmp_delta.argmin()] # The index of the variable to kick out
else:
# Find the i that will hit the elbow next
tmp_ind = np.delete(ind_n, ind_e)[
delta1 > eps2] # Remove Elbow from observations and keep non-zero elements
tmp_delta = delta1[delta1 > eps2] # All deltas that are non-zero
i_star = tmp_ind[tmp_delta.argmin()]
# Update beta
beta0[idx] = beta0[idx - 1] + delta * nu0
beta[idx] = beta[idx - 1]
beta[idx, ind_v] = beta[idx - 1, ind_v] + delta * nu
if s[idx] > s_max:
logger.info(f's = {s[idx]:.2f} is large enough')
break
# Reduce residuals not in the elbow by delta*gam
residual[np.in1d(ind_n, ind_e, invert=True)] -= delta * gam
# Check if there are points in either L or R if we do not drop
if (ind_l.size + ind_r.size == 1) & (not drop):
logger.info('No point in L or R')
break
# Add a variable to the active set
# Test if all variables are included. If yes, set lambda_var to -inf and continue with next step
if ind_v.size == k:
lambda_var = np.zeros((2, inactive.size))
lambda_var[lambda_var == 0] = -np.inf
else:
inactive = ind_k[np.in1d(ind_k, ind_v, invert=True)] # All variables not in V
tmp_e, tmp_l, tmp_r = ind_e, ind_l, ind_r # Create a copy of the index sets
if drop:
ind_v = ind_v[ind_v != j1] # Remove the detected variable from V
else:
# Add i_star to the Elbow and remove it from either Left or Right
# (we know that i_star hits the elbow)
tmp_e = np.append(tmp_e, i_star)
tmp_l = tmp_l[tmp_l != i_star]
tmp_r = tmp_r[tmp_r != i_star]
lambda_var = np.zeros((2, inactive.size)) # First row: sign=1, second row: sign=-1
lambda_var[lambda_var == 0] = -np.inf # Initially set to -inf (want to maximize lambda)
nu_var = np.zeros((2, inactive.size, 1 + ind_v.size + 1)) # Store nus in 3d array
b = np.array([0] * (ind_v.size + 1) + [1]) # The 1_0 vector (see p. 171 bottom)
for j_idx in range(inactive.size):
j_star = inactive[j_idx] # Select variable j as candidate for the next active variable
# Select all columns of x that are in ind_v and additionally j_star.
# Transposition improves performance as Python stores array in row-major order
x_v = xc.T.take(np.append(0, np.append(ind_v, j_star) + 1), axis=0, mode='clip').T
# Combination of (2.10) and (2.11)
x0 = np.vstack((np.hstack((np.ones((tmp_e.size, 1)),
self.x[tmp_e][:, ind_v].reshape((tmp_e.size, -1)),
self.x[tmp_e, j_star].reshape((tmp_e.size, -1)))),
np.hstack(
(0, np.sign(beta[idx, ind_v]), np.nan)))) # nan is a placeholder for sign
# Sign of the next variable to include may be either positive or negative
for sign in (1, -1):
index = np.where(sign == 1, 0, 1) # Index in nu_var and lambda_var
x0[-1, -1] = sign # Change sign in the x0 matrix
try:
nu_tmp = np.linalg.solve(x0, b) # Solve system (p. 171 bottom)
# If sign of last entry != sign then leave at -inf.
if | np.sign(nu_tmp[-1]) | numpy.sign |
#!/usr/bin/env python
# coding: utf-8
# ## Pendulum simulation
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import HTML
import pydae.svg_tools as svgt
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
plt.ion()
# In[2]:
from IPython.lib.display import YouTubeVideo
YouTubeVideo('4a0FbQdH3dY')
# Before simulating we need to import the class from the build module:
# In[3]:
from pydae import ssa
from pendulum import pendulum_class
# Then you can create an instance of the class:
# In[4]:
pend = pendulum_class()
# ### Initialization
# The `initialize` method gives the steady state of the system by solving first the backward and then the forward problems.
# In this case
# In[5]:
M = 30.0 # mass of the bob (kg)
L = 5.21 # length of the pendulum (m)
pend.initialize([{
'M':M,'L':L, # parameters setting
'theta':np.deg2rad(0) # initial desired angle = 0º
}],-1) # here -1 means that -1 is considered as initial gess for
# dynamic and algebraic states
# Once the system is initialized we can show the obtained variable values:
# In[6]:
pend.report_x() # obtained dynamic states
pend.report_y() # obtained algebraic states
pend.report_z() # obtained outputs
pend.report_u() # obtained algebraic states (theta is both state and output; f_x is both input and output)
pend.report_params() # considered parameters
# Another option to get values is to use the `get_value` method. Let's use it and then calculate the oscillation period of the pendulum as:
#
# $$
# T = 2\pi \sqrt{\frac{L}{G}}
# $$
# In[7]:
G = pend.get_value('G')
L = pend.get_value('L')
T = 2*np.pi*np.sqrt(L/G)
print(f'Oscillation period with formula: T = {T:0.2f} s')
# ### Small signal analisys
# After the system is initialized we can perform a small signal analysis. The `pydae.ssa` module has a method called `eval_A` to compute the matrix $A$ for the system:
#
# $$
# \Delta \mathbf{\dot {x}} = \mathbf{A}\Delta \mathbf{x}
# $$
# In[8]:
ssa.eval_A(pend) # method to linealized the system and to compute matrix A
eig_df=ssa.damp_report(pend) # method to create a pandas.DataFrame after computing eigenvalues for A
eig_df
# Using mode analysis the period of the pendulum can be computes choosing the third mode **Mode 1**:
# In[9]:
freq = eig_df['Freq.']['Mode 1'] # frequency of Mode 3 (Hz)
period = 1/freq # oscillation period
print(f'Oscillation period from small signal analysis: T = {period:0.2f} s')
# ### Simulation
#
# A time simulation can be performed using the method `simulate`:
#
# In[10]:
pend.simulate([{'t_end':1, 'theta':np.deg2rad(-5)}, # initilize the system with theta = -5º and run until t=1s
{'t_end':50,'f_x':0.0}], # release the pendulum by making the f_x force equal zero
'prev'); # here the initialization is using the previous computed
# steady state
# In[11]:
time = pend.T[:,0] # gets the simulated times
theta = np.rad2deg(pend.get_values('theta')) # gets the values for theta (and covert them from rad to deg)
# this is just to find the period of the theta oscillation:
idx_1 = np.where(theta==np.max(theta[(time>7)&(time<11)]))[0][0]
idx_2 = np.where(theta==np.max(theta[(time>10)&(time<14)]))[0][0]
t_1 = time[idx_1]
t_2 = time[idx_2]
period_sim = t_2 - t_1
# plotting the results with matplolib:
plt.close('all')
fig, axes = plt.subplots(nrows=1,ncols=1, figsize=(6, 3), dpi=100)
axes.plot(pend.T, np.rad2deg(pend.get_values('theta')), label=f'$\theta$')
axes.plot(t_1, np.rad2deg(pend.get_values('theta')[idx_1]),'o',ms=4)
axes.plot(t_2, np.rad2deg(pend.get_values('theta')[idx_2]),'o',ms=4)
axes.grid()
axes.set_ylabel('$\\theta (º)$')
axes.set_xlabel('Time (s)')
fig.tight_layout()
print(f'Oscillation period from simulation: T = {period_sim:0.2f} s')
# #### Animation of the results
#
# The obtained results can be animated.
# In[12]:
from importlib import reload
reload(svgt)
anim = svgt.animatesvg('../svg/pendulum_1_fx.svg','pendulum')
anim.set_size(600,400)
# start button:
anim.begin_click = True
anim.group_id = 'play'
anim.anim_id = 'play_1'
anim.translate(np.array([0.,3.]),np.array([0.0,0.0]),np.array([0.0,0.0]))
# pendulum:
anim.group_id = 'pendulum'
anim.begin_click = False
anim.anim_id = ''
anim.begin = 'play_1.begin'
times = pend.T[:,0]
anim.rotate(times,-np.rad2deg(pend.get_values('theta')),73.327,31.538)
# force:
x = pend.get_values('p_x')*10
y = -10*(5.21+pend.get_values('p_y'))
f_x = -pend.get_values('f_x')*0.05
anim.group_id = 'f_x_pos'
anim.begin = 'play_1.begin'
s_x = -np.copy(f_x)
s_x[s_x<0] = 0.0
s_y = s_x
anim.scale(pend.T[:,0],72.019669,83.537544,s_x,s_y)
anim.translate(pend.T[:,0],x,y)
anim.group_id = 'f_x_neg'
anim.begin = 'play_1.begin'
s_x = np.copy(f_x)
s_x[s_x<0] = 0.0
s_y = s_x
anim.scale(pend.T[:,0],74.635086,83.537544,s_x,s_y)
anim.translate(pend.T[:,0],x,y)
anim.save('pendulum_5deg.svg')
HTML('pendulum_5deg.svg')
# Let's supposed that we want to show that the period depends very little on the amplitude (if we consider a samll one). We can simulate twice and then compare the results:
#
# - `p_5`: system for initial position of 5º
# - `p_10`: system for initial position of 10º
# In[13]:
p_5 = pendulum_class()
p_10 = pendulum_class()
M = 30.0
L = 5.21
p_5.initialize([{'f_x':0,'M':M,'L':L,'theta':np.deg2rad(0)}],-1)
p_5.simulate([{'t_end':1, 'theta':np.deg2rad(-5)},
{'t_end':50,'f_x':0}],'prev');
p_10.initialize([{'f_x':0,'M':M,'L':L,'theta':np.deg2rad(0)}],-1)
p_10.simulate([{'t_end':1, 'theta':np.deg2rad(-10)},
{'t_end':50,'f_x':0}],'prev');
# In[14]:
anim = svgt.animatesvg('../svg/pendulum_2.svg','pendulum_1')
anim.set_size(600,400)
# start button:
anim.begin_click = True
anim.group_id = 'play'
anim.anim_id = 'play_anim_2'
anim.scale(np.array([0,1.0]),np.array([1.,1.]),np.array([1.,1.]),np.array([1.,1.]),np.array([1.,1.0]))
anim.anim_id = ''
# pendulums:
anim.group_id = 'pendulum_1'
anim.begin_click = False
anim.begin = 'play_anim_2.begin'
anim.rotate(p_5.T[:,0],-np.rad2deg(p_5.get_values('theta')),73.327,31.538)
anim.group_id = 'pendulum_2'
anim.rotate(p_10.T[:,0],-np.rad2deg(p_10.get_values('theta')),73.327,31.538)
anim.save('pendulum_5deg_10deg.svg')
HTML('pendulum_5deg_10deg.svg')
# Now we can check that mass does not affect the oscillation period:
# In[15]:
p_m30 = pendulum_class()
p_m105 = pendulum_class()
p_m30.initialize([{'f_x':0,'M':30,'L':L,'theta':np.deg2rad(0)}],-1)
p_m105.initialize([{'f_x':0,'M':30+75,'L':L,'theta':np.deg2rad(0)}],-0.1)
p_m105.report_x()
p_m105.report_y()
ssa.eval_ss(p_m30)
ssa.eval_ss(p_m105)
eig_df_m30 = ssa.damp_report(p_m30)
eig_df_m105 = ssa.damp_report(p_m105)
# In[16]:
freq_m30 = eig_df_m30['Freq.']['Mode 1'] # frequency of Mode 1 (Hz)
freq_m105 = eig_df_m105['Freq.']['Mode 1'] # frequency of Mode 1 (Hz)
print(f'Oscillation period from small signal analysis with M = 30 kg: T = {1/freq_m30:0.2f} s')
print(f'Oscillation period from small signal analysis with M = 105 kg: T = {1/freq_m105:0.2f} s')
# In[17]:
pend_push = pendulum_class()
pend_push.initialize([{'f_x':0,'M':30,'L':L,'theta':np.deg2rad(-10)}],-1)
pend_push.simulate([{'t_end':1, 'theta':np.deg2rad(-10)}, # initilize the system with theta = -5º and run until t=1s
{'t_end':1.5,'f_x':20.0}, # release the pendulum by but applying a positive force equal
{'t_end':10,'f_x':0.0}], # release the pendulum
'prev');
# In[18]:
from importlib import reload
reload(svgt)
anim = svgt.animatesvg('../svg/pendulum_1_fx.svg','pendulum')
anim.set_size(600,400)
# start button:
anim.begin_click = True
anim.group_id = 'play'
anim.anim_id = 'play_anim_3'
anim.translate(np.array([0.,3.]),np.array([0.0,0.1]),np.array([0.0,0.1]))
anim.anim_id = ''
# pendulum:
anim.group_id = 'pendulum'
anim.begin_click = False
anim.anim_id = 'pendulum_anim'
anim.begin = 'play_anim_3.begin'
times = pend_push.T[:,0]
anim.rotate(times,-np.rad2deg(pend_push.get_values('theta')),73.327,31.538)
# force:
x = pend_push.get_values('p_x')*10
y = -10*(5.21+pend_push.get_values('p_y'))
f_x = -pend_push.get_values('f_x')*0.05
anim.group_id = 'f_x_pos'
anim.begin = 'play_anim_3.begin'
#anim.begin = 'click'
s_x = -np.copy(f_x)
s_x[s_x<0] = 0.0
s_y = s_x
anim.scale(pend_push.T[:,0],72.019669,83.537544,s_x,s_y)
anim.translate(pend_push.T[:,0],x,y)
anim.group_id = 'f_x_neg'
anim.begin = 'play_anim_3.begin'
#anim.begin = 'click'
s_x = np.copy(f_x)
s_x[s_x<0] = 0.0
s_y = s_x
anim.scale(pend_push.T[:,0],74.635086,83.537544,s_x,s_y)
anim.translate(pend_push.T[:,0],x,y)
anim.save('pendulum_10deg_push.svg')
HTML('pendulum_10deg_push.svg')
# ### Control
# #### Simulation for control testing
# In[19]:
Δt = 0.1
p_ctrl = pendulum_class()
times = np.arange(0,30,Δt)
p_ctrl.initialize([{'M':30,'L':5.21,'theta':np.deg2rad(-10)}],-1)
f_x_0 = p_ctrl.get_value('f_x')
for t in times:
f_x = f_x_0
if t>2.0:
f_x = 0.0
p_ctrl.run([{'t_end':t,'f_x':f_x}])
p_ctrl.post();
# In[20]:
# plotting the results with matplolib:
plt.close('all')
fig, axes = plt.subplots(nrows=2,ncols=1, figsize=(6, 5), dpi=100)
axes[0].plot(p_ctrl.T, np.rad2deg(p_ctrl.get_values('theta')), label=f'$\theta$')
E_k = p_ctrl.get_values('E_k')
E_p = p_ctrl.get_values('E_p')
axes[1].plot(p_ctrl.T, E_k, label=f'$E_k$')
axes[1].plot(p_ctrl.T, E_p, label=f'$E_p$')
axes[1].plot(p_ctrl.T, E_k+E_p, label=f'$E$')
axes[0].grid()
axes[1].grid()
axes[1].legend()
axes[0].set_ylabel('Angle $\\theta\; (º)$')
axes[1].set_ylabel('Energy (J)')
axes[1].set_xlabel('Time (s)')
fig.tight_layout()
# #### Basic porportional regulator
# Now we are going to propose a control law as follows:
#
# $$
# \Delta f_x = - K v_x
# $$
#
# $$
# f_x = f_h + \Delta f_x
# $$
#
# where $f_x$ is the hand effort and $\Delta f_x$ is the controller force increment.
# In[21]:
Δt = 0.1
p_ctrl = pendulum_class()
times = np.arange(0,25,Δt)
p_ctrl.initialize([{'M':30,'L':5.21,'theta':np.deg2rad(-10)}],-1)
K = 20.0
f_x_0 = p_ctrl.get_value('f_x')
for t in times:
f_x_hand = f_x_0
if t>2.0:
f_x_hand = 0.0
v_x = p_ctrl.get_value('v_x') # speed in x measurement
f_x = f_x_hand - K*v_x # control law
p_ctrl.run([{'t_end':t,'f_x':f_x}]) # simulation until t(k+1) = Δt + t(k) with the updated f_x force
p_ctrl.post(); # required post processing
# In[22]:
# plotting the results with matplolib:
plt.close('all')
fig, axes = plt.subplots(nrows=2,ncols=1, figsize=(6, 5), dpi=100)
axes[0].plot(p_ctrl.T, np.rad2deg(p_ctrl.get_values('theta')), label=f'$\theta$')
E_k = p_ctrl.get_values('E_k')
E_p = p_ctrl.get_values('E_p')
axes[1].plot(p_ctrl.T, E_k, label=f'$E_k$')
axes[1].plot(p_ctrl.T, E_p, label=f'$E_p$')
axes[1].plot(p_ctrl.T, E_k+E_p, label=f'$E$')
axes[0].grid()
axes[1].grid()
axes[1].legend()
axes[0].set_ylabel('Angle $\\theta\; (º)$')
axes[1].set_ylabel('Energy (J)')
axes[1].set_xlabel('Time (s)')
fig.tight_layout()
# In[23]:
anim = svgt.animatesvg('../svg/pendulum_1_fx.svg','pendulum')
anim.set_size(600,400)
# start button:
anim.begin_click = True
anim.group_id = 'play'
anim.anim_id = 'play_anim_4'
anim.scale( | np.array([0,10000.0]) | numpy.array |
import argparse
import functools
import numpy as np
import os.path
import scipy.linalg as sla
import sys
import datetime
import os
import psutil
from pyspark import SparkContext, SparkConf
from pyspark.mllib.linalg import SparseVector
###################################
# Utility functions
###################################
def select_topr(vct_input, r):
"""
Returns the R-th greatest elements indices
in input vector and store them in idxs_n.
"""
temp = np.argpartition(-vct_input, r)
idxs_n = temp[:r]
return idxs_n
def input_to_rowmatrix(raw_rdd, norm):
"""
Utility function for reading the matrix data
"""
# Parse each line of the input into a numpy array of floats. This requires
# several steps.
# 1: Split each string into a list of strings.
# 2: Convert each string to a float.
# 3: Convert each list to a numpy array.
p_and_n = functools.partial(parse_and_normalize, norm = norm)
numpy_rdd = raw_rdd \
.zipWithIndex() \
.map(lambda x: (x[1], p_and_n(x[0])))
return numpy_rdd
###################################
# Spark helper functions
###################################
def parse_and_normalize(line, norm):
"""
Utility function. Parses a line of text into a floating point array, then
whitens the array.
"""
x = np.array([float(c) for c in line.strip().split()])
if norm:
x -= x.mean() # 0-mean.
x /= sla.norm(x) # Unit norm.
return x
def vector_matrix(row):
"""
Applies u * S by row-wise multiplication, followed by a reduction on
each column into a single vector.
"""
row_index, vector = row # Split up the [key, value] pair.
u = _U_.value # Extract the broadcasted vector "u".
# This means we're in the first iteration and we just want a random
# vector. To ensure all the workers generate the same random vector,
# we have to seed the RNG identically.
if type(u) == tuple:
T, seed = u
np.random.seed(seed)
u = np.random.random(T)
u -= u.mean()
u /= sla.norm(u)
u = u[row_index]
# Generate a list of [key, value] output pairs, one for each nonzero
# element of vector.
out = []
for i in range(vector.shape[0]):
out.append([i, u * vector[i]])
return out
def matrix_vector(row):
"""
Applies S * v by row-wise multiplication. No reduction needed, as all the
summations are performed within this very function.
"""
k, row = row # Extract the broadcast variables.
v = _V_.value
# Perform the multiplication using the specified indices in both arrays.
innerprod = | np.dot(row[v.indices], v.values) | numpy.dot |
"""Tools for running mdoel simulations"""
import logging
import numpy as np
import sympy as sp
from scipy.optimize import broyden1
from .exceptions import ModelException
try:
from scikits.odes.dae import dae
except ImportError:
print("Warning - sciket.odes not found. Simulations are disabled.")
logger = logging.getLogger(__name__)
def _fetch_ic(x0, dx0, system, func, t0, eps=0.001):
if isinstance(x0, list):
assert len(x0) == len(system.state_vars)
X0 = np.array(x0, dtype=np.float64)
elif isinstance(x0, dict):
X0 = np.array(
[np.NaN for _ in system.state_vars], dtype=np.float64
)
for k, v in x0.items():
_, idx = str(k).split('_')
idx = int(idx)
X0[idx] = v
elif isinstance(x0, (int, float, complex)) and len(system.state_vars) == 1:
X0 = np.array([x0], dtype=np.float64)
elif isinstance(x0, np.ndarray) and x0.shape == (len(system.state_vars), ):
X0 = x0
else:
raise ModelException(f"Invalid Initial Conditions: {x0}")
if dx0:
DX0 = np.array(dx0, dtype=np.float64)
else:
DX0 = np.zeros(X0.shape, dtype=np.float64)
# if we don't have consistent initial conditions; find them if we can
# fail if we can't
def f(y):
res = np.empty_like(X0)
func(t0, X0, y, res)
return res
if np.linalg.norm(f(DX0)) > eps:
DX0 = broyden1(f, DX0)
if np.linalg.norm(f(DX0)) > 0.001:
raise ModelException(
f"Inconsistent initial conditions: "
f"Could not find dx0 for the given x0 {x0}")
return X0, DX0
def simulate(system,
timespan,
x0,
dx0=None,
dt=0.1,
control_vars=None):
"""Simulate the system dynamics.
This method integrates the dynamics of the system over the specified
interval of time, starting at the specified initial state.
The solver used is a differential-algebraic integrator which respects
conservation laws and algebraic constraints. It is expected that the
initial state satisfies the systems inherent algebraic constrains;
inconsistent initial conditions will raise exceptions.
The initial values of derivatives can be specified and the solver will
ensure they are consistent with the initial state, or change them if they
are not.
Currently, control variables can take the form of numbers or a strings
and are assigned via a dictionary or list.
Permissible strings:
* numerical constants such as `1.0`, `pi`
* time `t`
* state variables; for example `x_0`
* arithmetic operators such as `+`,`-`, `*`, `/`, as well as `^`
(power operator), `%` (remainder)
* elementary math functions such as `sin`, `exp`, `log`
* ternary if; for example `t < 0 ? 0 : 1` which implements the Heaviside
Args:
system :obj:`BondGraph`: The system to simulate
timespan: A pair (`list` or `tuple`) containing the start and end points
of the simulation.
x0: The initial conditions of the system.
dx0 (Optional): The initial rates of change of the system. The default
value (`None`) indicates that the system should be
initialised from the state variable initial conditions.
dt: The time step between reported (not integrated) values.
control_vars: A `dict`, `list` or `tuple` specifing the values of the
control variables.
Returns:
t: numpy array of timesteps
x: numpy array of state values
Raises:
ModelException, SolverException
"""
if system.ports:
raise ModelException(
"Cannot Simulate %s: unconnected ports %s",
system, system.ports)
if system.control_vars and not control_vars:
raise ModelException("Control variable not specified")
samples = int((timespan[1]-timespan[0]) / dt) + 1
t = np.linspace(*timespan, samples)
res, X = _bondgraph_to_residuals(system, control_vars)
X0, DX0 = _fetch_ic(x0, dx0, system, res, t[0])
solver_name = 'ida'
dae_solver = dae(solver_name, res)
sol = dae_solver.solve(t, X0, DX0)
return t.reshape((samples, 1)), np.transpose(sol.values.y).T
def _to_function(string, X, DX, substitutions):
f = sp.sympify(string).subs(substitutions)
f_n = sp.lambdify((sp.S('t'), X, DX), f, "numpy")
return f_n
def _bondgraph_to_residuals(model, control_vars=None):
dX = sp.IndexedBase('dX')
X = sp.IndexedBase('X')
U = sp.IndexedBase('U')
x_subs = []
dx_subs = []
u_subs = []
u_func = []
n = len(model.state_vars)
m = 0
for i, x in enumerate(model.state_vars):
x_subs.append((x, X[i]))
dx_subs.append((sp.S(f'dx_{i}'), dX[i]))
if len(model.control_vars) > 0:
u_func_dict = {}
u_constants = {}
if isinstance(control_vars, list):
u_func_dict.update({
i: f for i, f in enumerate(control_vars)}
)
elif isinstance(control_vars, dict):
u_func_dict.update({
int(v[2:]): f for v, f in control_vars.items()
})
elif len(model.control_vars) == 1:
u_func_dict[0] = control_vars
else:
raise TypeError(f"Control argument {control_vars} is invalid")
test_x = np.zeros(shape=(n,), dtype=np.float32)
for idx, f in u_func_dict.items():
try:
if isinstance(f, (float, int, sp.Number)):
u_constants[idx] = f
continue
if isinstance(f, str):
f = _to_function(f, X, dX, dx_subs + x_subs)
u_func_dict[idx] = f
if n == 1:
r = f(0, 0, 0)
else:
r = f(0, test_x, test_x)
assert isinstance(r, (float, int, sp.Number)
), "Invalid output from control"
except Exception as ex:
message = f"Invalid control function for var: u_{idx}.\n " \
"Control functions should be of the form:\n" \
f"u_{idx} = f(t, x, dx/dt)"
raise ModelException(message)
for i, u in enumerate(model.control_vars):
if i in u_constants:
u_subs.append((u, u_constants[i]))
continue
u_subs.append((u, U[m]))
try:
u_func.append(u_func_dict[i])
except KeyError:
raise ModelException(f"Control variable {u} must be specified")
m += 1
rels = [r.subs(dx_subs).subs(x_subs).subs(u_subs)
for r in model.constitutive_relations]
if len(rels) != n:
raise ModelException(
"Model simplification error: system is under-determined")
Fsym = sp.symarray('F', shape=n)
for i, r in enumerate(rels):
Fsym[i] = r
t = sp.S('t')
_r = | np.empty(shape=(n,), dtype=np.float64) | numpy.empty |
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.mplot3d import axes3d # necessary !!
from pyqchem.order_states import get_order_states_list, correct_order_list
import argparse
# Argument parser
parser = argparse.ArgumentParser(description='Plot 3D data')
parser.add_argument('filename', metavar='filename', type=str,
help='filename for input')
parser.add_argument('--output_folder', metavar='distance', type=str, default='3d_plot/',
help='folder to store PDF plots')
parser.add_argument('--show_plots', action='store_true',
help='show plots while running')
parser.add_argument('--full', action='store_true',
help='apply periodicity (centered at 0,0)')
parser.add_argument('--interpolate', action='store_true',
help='interpolate missing points')
args = parser.parse_args()
def interpolate_data(points, data , y_range, z_range):
from scipy.interpolate import griddata
Y, Z = np.meshgrid(y_range, z_range)
grid_z2 = griddata(points, data, (Y, Z), method='cubic')
return grid_z2.T.flatten()
def triplot(data1, data2, label1, label2, y_range, z_range, wireframe=False, pdf=None, clabels=False,
zlabel='Energy [eV]', zlevels=np.arange(-1.5, 1.5 + 0.025, 0.025), show_plot=True):
# from matplotlib.colors import LinearSegmentedColormap
# from matplotlib.colors import BoundaryNorm
# from matplotlib.ticker import MaxNLocator
cmap = plt.get_cmap('PiYG')
Y, Z = np.meshgrid(z_range, y_range)
plt.figure(1)
plt.title(label1)
CS = plt.contourf(Y, Z, np.array(data1).reshape(len(y_range), len(z_range)), levels=zlevels, cmap=cmap)
CS2 = plt.contour(CS, levels=CS.levels[::1], colors='black')
if clabels:
plt.clabel(CS2, inline=1, fontsize=10)
plt.xlabel('X [Å]')
plt.ylabel('Y [Å]')
plt.xlim([-4, 4])
plt.ylim([-4, 4])
cbar = plt.figure(1).colorbar(CS)
cbar.ax.set_ylabel(zlabel)
if pdf is not None:
pdf.savefig()
if not show_plot:
plt.close()
plt.figure(2)
plt.title(label2)
CS = plt.contourf(Y, Z, np.array(data2).reshape(len(y_range), len(z_range)), levels=zlevels, cmap=cmap)
CS2 = plt.contour(CS, levels=CS.levels[::1], colors='black')
if clabels:
plt.clabel(CS2, inline=1, fontsize=10)
plt.xlabel('X [Å]')
plt.ylabel('Y [Å]')
plt.xlim([-4, 4])
plt.ylim([-4, 4])
cbar = plt.figure(2).colorbar(CS)
cbar.ax.set_ylabel(zlabel)
if pdf is not None:
pdf.savefig()
if not show_plot:
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if wireframe:
ax.plot_wireframe(Y, Z, np.array(data1).reshape(len(y_range), len(z_range)), color='b')
ax.plot_wireframe(Y, Z, np.array(data2).reshape(len(y_range), len(z_range)), color='r')
else:
ax.plot_surface(Y, Z, np.array(data1).reshape(len(y_range), len(z_range)), color='b')
ax.plot_surface(Y, Z, np.array(data2).reshape(len(y_range), len(z_range)), color='r')
fake2Dline = mpl.lines.Line2D([0], [0], linestyle="none", c='b', marker='o')
fake2Dline2 = mpl.lines.Line2D([0], [0], linestyle="none", c='r', marker='o')
ax.legend([fake2Dline, fake2Dline2], [label1, label2], numpoints=1)
ax.set_xlabel('X [Å]')
ax.set_ylabel('Y [Å]')
ax.set_zlabel(zlabel)
if pdf is not None:
pdf.savefig(fig)
if show_plot:
plt.show()
else:
plt.close()
def biplot_interpolated(data1, data2, label1, label2, y_range, z_range, pdf=None,
zlabel='Energy [eV]', zrange=(-1.5, 1.5), zp=0.025, show_plot=True):
from scipy import interpolate
Y, Z = np.meshgrid(z_range, y_range)
print('----------------')
print(Y.shape)
print(len(Y), len(data1))
#f1 = interpolate.interp2d(Y, Z, data1, kind='cubic')
f1 = interpolate.interp2d(Y, Z, data1, kind='linear')
f2 = interpolate.interp2d(Y, Z, data2, kind='linear')
x = np.arange(-4, 4, zp)
plt.figure(3)
plt.xlim([-4, 4])
plt.ylim([zrange[0], zrange[1]])
plt.xlabel('X [Å]')
plt.ylabel(zlabel)
plt.plot(x, f1(x, 0), label=label1)
plt.plot(x, f2(x, 0), label=label2)
plt.legend()
if pdf is not None:
pdf.savefig()
if show_plot:
plt.show()
else:
plt.close()
def biplot(data1, data2, label1, label2, y_range, z_range, pdf=None,
zlabel='Energy [eV]', zrange=(-1.5, 1.5), title=None, show_plot=True,
direction=0):
plt.figure(3)
if direction == 0:
data1 = np.array(data1).reshape([len(y_range), len(z_range)])[len(z_range)//2]
data2 = np.array(data2).reshape([len(y_range), len(z_range)])[len(z_range)//2]
plt.xlabel('X [Å]')
if direction == 1:
data1 = np.array(data1).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
data2 = np.array(data2).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
plt.xlabel('Y [Å]')
plt.title(title)
plt.xlim([-4, 4])
if zrange is not None:
plt.ylim([zrange[0], zrange[1]])
plt.ylabel(zlabel)
plt.plot(z_range, data1, label=label1)
plt.plot(z_range, data2, label=label2)
plt.legend()
if pdf is not None:
pdf.savefig()
if show_plot:
plt.show()
else:
plt.close()
def multibiplot(data, labels, y_range, z_range, pdf=None,
zlabel='Energy [eV]', zrange=(-1.5, 1.5), title=None, show_plot=True,
direction=0):
plt.figure(3)
if direction == 0:
for i, dat in enumerate(data):
data[i] = np.array(dat).reshape([len(y_range), len(z_range)])[len(z_range)//2]
#data[i] = np.array(dat).reshape([len(y_range), len(z_range)])[len(z_range)//2]
plt.xlabel('X [Å]')
if direction == 1:
for i, dat in enumerate(data):
data[i] = np.array(dat).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
# data2 = np.array(data2).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
plt.xlabel('Y [Å]')
plt.title(title)
plt.xlim([-4, 4])
if zrange is not None:
plt.ylim([zrange[0], zrange[1]])
plt.ylabel(zlabel)
for dat, l in zip(data, labels):
plt.plot(z_range, dat, label=l)
# plt.plot(z_range, data2, label=label2)
plt.legend()
if pdf is not None:
pdf.savefig()
if show_plot:
plt.show()
else:
plt.close()
def multibiplot_2axis(data, labels, data2, labels2, y_range, z_range, pdf=None,
zlabel='Energy [eV]', zlabel2=' ', zrange=(-1.5, 1.5), zrange2=(-1.5, 1.5),
title=None, show_plot=True, direction=0):
#plt.figure(3)
f, ax1 = plt.subplots()
ax2 = ax1.twinx()
if direction == 0:
ax1.set_xlabel('X [Å]')
for i, dat in enumerate(data):
data[i] = np.array(dat).reshape([len(y_range), len(z_range)])[len(z_range)//2]
for i, dat in enumerate(data2):
data2[i] = np.array(dat).reshape([len(y_range), len(z_range)])[len(z_range)//2]
if direction == 1:
ax1.set_xlabel('Y [Å]')
for i, dat in enumerate(data):
data[i] = np.array(dat).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
# data2 = np.array(data2).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
for i, dat in enumerate(data2):
data2[i] = np.array(dat).reshape([len(y_range), len(z_range)])[:,len(y_range)//2]
plt.title(title)
ax1.set_xlim([-4, 4])
if zrange is not None:
ax1.set_ylim([zrange[0], zrange[1]])
ax1.set_ylabel(zlabel)
if zrange2 is not None:
ax2.set_ylim([zrange[0], zrange[1]])
ax2.set_ylabel(zlabel2)
lns1 = []
for dat, l in zip(data, labels):
lns1.append(ax1.plot(z_range, dat, label=l))
# plt.plot(z_range, data2, label=label2)
lns2 = []
for dat, l in zip(data2, labels2):
lns2.append(ax2.plot(z_range, dat, label=l))
lns = [j for i in lns1 for j in i] + [j for i in lns2 for j in i]
labs = [l.get_label() for l in lns]
plt.legend(lns, labs, loc=0)
if pdf is not None:
pdf.savefig()
if show_plot:
plt.show()
else:
plt.close()
def v2_mayavi(data1, data2, label1, label2, y_range, z_range,):
transparency = False
from mayavi import mlab
fig = mlab.figure()
x = np.arange(-2, 2, 0.1)
y = np.arange(-2, 2, 0.1)
mx, my = np.meshgrid(x, y, indexing='ij')
mz1 = np.abs(mx) + np.abs(my)
mz2 = mx ** 2 + my ** 2
print(mx.shape)
print(my.shape)
print(mz1.shape)
my, mx = np.meshgrid(z_range, y_range)
mz1 = np.array(data1).reshape([len(y_range), len(z_range)])
mz2 = np.array(data2).reshape([len(y_range), len(z_range)])
print(my.shape)
print(mx.shape)
print(mz1.shape)
ax_ranges = [-4, 4, -4, 4, -0.4, 0.4]
ax_scale = [1.0, 1.0, 10]
ax_extent = ax_ranges * np.repeat(ax_scale, 2)
surf3 = mlab.surf(mx, my, mz1, colormap='Blues')
surf4 = mlab.surf(mx, my, mz2, colormap='Oranges')
surf3.actor.actor.scale = ax_scale
surf4.actor.actor.scale = ax_scale
mlab.view(60, 74, 17, [-2.5, -4.6, -0.3])
mlab.outline(surf3, color=(.7, .7, .7), extent=ax_extent)
mlab.axes(surf3, color=(.7, .7, .7), extent=ax_extent,
ranges=ax_ranges,
xlabel='x', ylabel='y', zlabel='z')
if transparency:
surf3.actor.property.opacity = 0.5
surf4.actor.property.opacity = 0.5
fig.scene.renderer.use_depth_peeling = 1
mlab.show()
#############################
folder = args.output_folder
#############################
with open(args.filename, 'rb') as input:
calculation_data = pickle.load(input)
print('Loaded data from: {}'.format(args.filename))
do_full = args.full
interpolate = args.interpolate
for slide_y in calculation_data['range_y']:
for slide_z in calculation_data['range_z']:
print(slide_y, slide_z)
if '{}_{}'.format(slide_y, slide_z) in calculation_data:
print(calculation_data['{}_{}'.format(slide_y, slide_z)])
data_i = calculation_data['{}_{}'.format(slide_y, slide_z)]
print(data_i['states_info'])
if do_full:
calculation_data.update({'{}_{}'.format(-slide_y, slide_z): data_i,
'{}_{}'.format(slide_y, -slide_z): data_i,
'{}_{}'.format(-slide_y, -slide_z): data_i})
if do_full:
y_range = np.unique((-np.array(calculation_data['range_y'])).tolist() + calculation_data['range_y'])
z_range = np.unique((-np.array(calculation_data['range_z'])).tolist() + calculation_data['range_z'])
else:
y_range = calculation_data['range_y']
z_range = calculation_data['range_z']
points = []
total_data = []
full_range = []
i = 0
for slide_y in y_range:
for slide_z in z_range:
print('{}_{}'.format(slide_y, slide_z))
if '{}_{}'.format(slide_y, slide_z) in calculation_data:
data = calculation_data['{}_{}'.format(slide_y, slide_z)]
total_data.append(data)
full_range.append(i)
points.append([slide_y, slide_z])
i += 1
states_orders = [get_order_states_list(data['states_info']) for data in total_data]
######################## W_DC ######################
data_1 = []
data_2 = []
data_3 = []
data_4 = []
for diab, coeff in [[data['diabatic_energies'], data['coefficients']] for data in total_data]:
factor = coeff['S_01'][0] * coeff['S_10'][0]
data_1.append(diab['V_DC'] * np.sign(factor))
factor = coeff['S_01'][1] * coeff['S_10'][1]
data_2.append(diab['V_DC'] * np.sign(factor))
factor = coeff['S_01'][2] * coeff['S_10'][2]
data_3.append(diab['V_DC'] * np.sign(factor))
factor = coeff['S_01'][3] * coeff['S_10'][3]
data_4.append(diab['V_DC'] * np.sign(factor))
data_1 = [data['diabatic_contributions']['W_DC'][0] for data in total_data]
data_2 = [data['diabatic_contributions']['W_DC'][1] for data in total_data]
data_3 = [data['diabatic_contributions']['W_DC'][2] for data in total_data]
data_4 = [data['diabatic_contributions']['W_DC'][3] for data in total_data]
data_1, data_2, data_3, data_4 = correct_order_list([data_1, data_2, data_3, data_4], states_orders)
if interpolate:
data_1 = interpolate_data(points, data_1, y_range, z_range)
data_2 = interpolate_data(points, data_2, y_range, z_range)
wdc1 = np.array(data_1)
wdc2 = np.array(data_2)
#v2_mayavi(data_1, data_2, '$W^{(1)}_{DC}$', '$W^{(2)}_{DC}$', y_range, z_range)
#exit()
with PdfPages(folder + 'W_DC.pdf') as pdf:
triplot(data_1, data_2, '$W^{(1)}_{DC}$', '$W^{(2)}_{DC}$', y_range, z_range, pdf=pdf, wireframe=True,
show_plot=args.show_plots, zlevels=np.arange(-0.4, 0.4 + 0.05, 0.05))
#show_plot = args.show_plots, zlevels = np.arange(-0.4, 0.4 + 0.05, 0.05))
biplot(data_1, data_2, '$W^{(1)}_{DC}$', '$W^{(2)}_{DC}$', y_range, z_range, show_plot=args.show_plots,
direction=0, pdf=pdf)
biplot(data_1, data_2, '$W^{(1)}_{DC}$', '$W^{(2)}_{DC}$', y_range, z_range, show_plot=args.show_plots,
direction=1, pdf=pdf)
######################## W_CT ######################
data_1 = [data['diabatic_contributions']['W_CT'][0] for data in total_data]
data_2 = [data['diabatic_contributions']['W_CT'][1] for data in total_data]
data_3 = [data['diabatic_contributions']['W_CT'][2] for data in total_data]
data_4 = [data['diabatic_contributions']['W_CT'][3] for data in total_data]
data_1, data_2, data_3, data_4 = correct_order_list([data_1, data_2, data_3, data_4], states_orders)
if interpolate:
data_1 = interpolate_data(points, data_1, y_range, z_range)
data_2 = interpolate_data(points, data_2, y_range, z_range)
wct1 = np.array(data_1)
wct2 = np.array(data_2)
with PdfPages(folder + 'W_CT.pdf') as pdf:
triplot(data_1, data_2, '$W^{(1)}_{CT}$', '$W^{(2)}_{CT}$', y_range, z_range, pdf=pdf, wireframe=True, show_plot=args.show_plots)
biplot(data_1, data_2, '$W^{(1)}_{CT}$', '$W^{(2)}_{CT}$', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=0)
biplot(data_1, data_2, '$W^{(1)}_{CT}$', '$W^{(2)}_{CT}$', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=1)
######################## W_e ######################
data_1 = [data['diabatic_contributions']['W_e'][0] for data in total_data]
data_2 = [data['diabatic_contributions']['W_e'][1] for data in total_data]
data_3 = [data['diabatic_contributions']['W_e'][2] for data in total_data]
data_4 = [data['diabatic_contributions']['W_e'][3] for data in total_data]
data_1, data_2, data_3, data_4 = correct_order_list([data_1, data_2, data_3, data_4], states_orders)
if interpolate:
data_1 = interpolate_data(points, data_1, y_range, z_range)
data_2 = interpolate_data(points, data_2, y_range, z_range)
we1 = np.array(data_1)
we2 = np.array(data_2)
with PdfPages(folder + 'W_e.pdf') as pdf:
triplot(data_1, data_2, 'W_e_1', 'W_e_2', y_range, z_range, pdf=pdf, wireframe=True, show_plot=args.show_plots)
biplot(data_1, data_2, 'W_e_1', 'W_e_2', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=0)
biplot(data_1, data_2, 'W_e_1', 'W_e_2', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=1)
####################### W_h ######################
data_1 = [data['diabatic_contributions']['W_h'][0] for data in total_data]
data_2 = [data['diabatic_contributions']['W_h'][1] for data in total_data]
data_3 = [data['diabatic_contributions']['W_h'][2] for data in total_data]
data_4 = [data['diabatic_contributions']['W_h'][3] for data in total_data]
data_1, data_2, data_3, data_4 = correct_order_list([data_1, data_2, data_3, data_4], states_orders)
if interpolate:
data_1 = interpolate_data(points, data_1, y_range, z_range)
data_2 = interpolate_data(points, data_2, y_range, z_range)
wh1 = np.array(data_1)
wh2 = np.array(data_2)
with PdfPages(folder + 'W_h.pdf') as pdf:
triplot(data_1, data_2, 'W_h_1', 'W_h_2', y_range, z_range, pdf=pdf, wireframe=True, show_plot=args.show_plots)
biplot(data_1, data_2, 'W_h_1', 'W_h_2', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=0)
biplot(data_1, data_2, 'W_h_1', 'W_h_2', y_range, z_range, show_plot=args.show_plots, pdf=pdf, direction=1)
####################### diabatic_energies ######################
data_1 = [np.average(data['diabatic_energies']['E_LE']) for data in total_data]
data_2 = [np.average(data['diabatic_energies']['E_CT']) for data in total_data]
if interpolate:
data_1 = interpolate_data(points, data_1, y_range, z_range)
data_2 = interpolate_data(points, data_2, y_range, z_range)
e_le = | np.array(data_1) | numpy.array |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.secureprotol.iterative_affine import IterativeAffineCipher
class TestAffine(unittest.TestCase):
def setUp(self):
self.randomized_key = IterativeAffineCipher.generate_keypair(randomized=True)
self.deterministic_key = IterativeAffineCipher.generate_keypair(randomized=False)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_add_randomized(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000)
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = self.randomized_key.encrypt(x)
en_y = self.randomized_key.encrypt(y)
en_z = self.randomized_key.encrypt(z)
en_t = self.randomized_key.encrypt(t)
en_res = en_x + en_y + en_z + en_t
res = x + y + z + t
de_en_res = self.randomized_key.decrypt(en_res)
self.assertAlmostEqual(de_en_res, res)
def test_add_randomized(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000)
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = self.deterministic_key.encrypt(x)
en_y = self.deterministic_key.encrypt(y)
en_z = self.deterministic_key.encrypt(z)
en_t = self.deterministic_key.encrypt(t)
en_res = en_x + en_y + en_z + en_t
res = x + y + z + t
de_en_res = self.deterministic_key.decrypt(en_res)
self.assertAlmostEqual(de_en_res, res)
def test_mul_randomized(self):
x_li = ( | np.ones(100) | numpy.ones |
"""Classification using random forest."""
import logging
import pickle
import numpy as np
from sklearn.ensemble import RandomForestClassifier
logger = logging.getLogger(__name__)
class RandomForest:
"""Train or classify using a RandomForest model."""
def __init__(self, num_features, model=None):
"""Create instance of RandomForest.
Args:
num_features (int): Number of features to train or classify.
num_trees (int, optional): [description]. Defaults to 200.
model ([type], optional): [description]. Defaults to None.
"""
self.num_features = num_features
self.model = self.load_model(model)
def load_model(self, model):
"""Load trained sklearn.ensemble.RandomForestClassifier model.
Args:
model_path (str): path to the trained model
Returns:
sklearn.ensemble.RandomForestClassifier: Trained model, see reference for details.
"""
if model is None:
return None
# Check if the model_input is a path or an sklearn random forest model
if isinstance(model, str):
try:
model = pickle.load(open(model, "rb"))
return self.validate_model(model)
except OSError:
logger.error("Could not load RandomForestModel")
return None
elif isinstance(model, RandomForestClassifier):
# Validate model based on parameters
return self.validate_model(model)
return None
def validate_model(self, model):
"""Validate a model with the current class instantiation.
Args:
model (sklearn.ensemble.RandomForestClassifier): A trained RandomForestClassifier
Returns:
[sklearn.ensemble.RandomForestClassifier]: A valid trained RandomForestClassifier
"""
if not isinstance(model, RandomForestClassifier):
logger.error(
"Can not validate model, is not of instance sklearn.ensemble.forest.RandomForestClassifier"
)
return None
if not model.n_features_ == self.num_features:
logger.error(
"Number of features is different from model parameter. Model has: %d, input was: %d",
model.n_features_,
self.num_features,
)
return None
return model
def train(self, X, y, num_trees=100, processors=-1):
"""Train/Fit a RandomForestClassifier using the observation matrix X and class vector y.
Args:
X (np.array): 2D Matrix of feature observations.
y (np.array): 1D vector of class labels.
num_tress (int): Number of tress used in the forest.
processors (int): Number of parallel jobs used to train, -1 means all processors.
Returns:
sklearn.ensemble.RandomForestClassifier: A trained RandomForestClassifier model.
"""
# If a model is already defined, something is wrong. Does not support training multiple times in a row.
if self.model is not None:
logger.error(
"Surfclass does not support training an already existing model.."
)
return None
# validate X fits the parameters given in init
assert isinstance(X, np.ndarray), "X is not a valid numpy.ndarray"
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert y.ndim == 1, "y does not have the correct shape, should be 1D vector"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
assert (
X.shape[0] == y.shape[0]
), "Number of class observations does not match number of feature observations."
rf = RandomForestClassifier(
n_estimators=num_trees, oob_score=False, verbose=0, n_jobs=processors
)
# fit the model
rf_trained = rf.fit(X, y)
# save the model to the instanced class (useful when one want to run classify immediately after)
self.model = rf_trained
# return the trained model
return rf_trained
def classify(self, X, prob=False, processors=None):
"""Classify X using the instantiated RandomForestClassifier model.
Args:
X (np.array): 2D Matrix of feature observations.
prob (bool): If true returns tuple with classified vector and highest class probability vector
processors (int): Number of parallel jobs used to train. -1 means all processors, None means model default.
Returns:
np.array or tuple (np.array,np.array): classified vector or tuple of classified vector and probability vector
"""
assert (
self.model is not None
), "Could not find a model, please either train a model or initialise the class with a valid model path"
# TODO: This might be double-work but the model attribute can have been changed
model = self.validate_model(self.model)
if isinstance(processors, int):
model.n_jobs = processors
# Test the X input is acceptable for the given model.
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert isinstance(X, np.ndarray), "X is not a valid numpy array"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
# run the classificaiton using X
classes = self.model.classes_
class_prediction_prob = model.predict_proba(X)
class_prediction = classes[np.argmax(class_prediction_prob, axis=1)]
# return tuple with class prediction and highest class probability if prob
if prob:
return (class_prediction, | np.amax(class_prediction_prob, axis=1) | numpy.amax |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = | np.array([]) | numpy.array |
#!/usr/bin/env python
import numpy as np
import scipy.stats as stats
import itertools
import matplotlib
from matplotlib import cm
from matplotlib.ticker import FuncFormatter
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.linear_model
from volcanic.helpers import bround
from volcanic.tof import calc_tof, calc_es, calc_s_es
from volcanic.exceptions import MissingDataError
def get_reg_targets(idx1, idx2, d, tags, coeff, regress, mode="k"):
"""Separate regression targets and regressor variables."""
tag1 = tags[idx1]
tag2 = tags[idx2]
tags = tags[regress]
X1 = d[:, idx1].reshape(-1)
X2 = d[:, idx2].reshape(-1)
d1 = d[:, regress]
d2 = d[:, ~regress]
coeff = coeff[regress]
if mode == "t":
d1 = d1[:, ~coeff]
tags = tags[~coeff]
return X1, X2, tag1, tag2, tags, d1, d2, coeff
def plot_ci_manual(t, s_err, n, x, x2, y2, ax=None):
if ax is None:
ax = plt.gca()
ci = (
t
* s_err
* np.sqrt(1 / n + (x2 - np.mean(x)) ** 2 / np.sum((x - np.mean(x)) ** 2))
)
ax.fill_between(x2, y2 + ci, y2 - ci, color="#b9cfe7", alpha=0.6)
return ax
def plot_3d_lsfer(
idx1,
idx2,
d,
tags,
coeff,
regress,
cb="white",
ms="o",
lmargin=5,
rmargin=5,
npoints=100,
plotmode=1,
verb=0,
):
x1base = 20
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
d_refill = np.zeros_like(d)
d_refill[~np.isnan(d)] = d[~np.isnan(d)]
lnsteps = range(d.shape[1])
mape = 100
for j in lnsteps[1:-1]:
if verb > 0:
print(f"Plotting regression of {tags[j]}.")
XY = np.vstack([X1, X2, d[:, j]]).T
if isinstance(cb, np.ndarray):
cbi = np.array(cb)[~np.isnan(XY).any(axis=1)]
else:
cbi = cb
if isinstance(ms, np.ndarray):
msi = np.array(ms)[~np.isnan(XY).any(axis=1)]
else:
msi = ms
XYm = XY[np.isnan(XY).any(axis=1)]
XY = XY[~np.isnan(XY).any(axis=1)]
Xm = XYm[:, :2]
Ym = XYm[:, 2]
X = XY[:, :2]
Y = XY[:, 2]
xmax = bround(Y.max() + rmargin, x1base)
xmin = bround(Y.min() - lmargin, x1base)
xint = np.sort(Y)
reg = sk.linear_model.LinearRegression().fit(X, Y)
if verb > 2:
print(
f"Linear model has coefficients : {reg.coef_} \n and intercept {reg.intercept_}"
)
Y_pred = reg.predict(X)
p = reg.coef_
currmape = sk.metrics.mean_absolute_percentage_error(Y, Y_pred)
for k, y in enumerate(Ym):
if not np.isnan(Xm[k, 0]) and not np.isnan(Xm[k, 1]) and np.isnan(Ym[k]):
Ym[k] = reg.predict(Xm[k])
d_refill[np.isnan(d).any(axis=1)][:, j][k] = Ym[k]
elif not np.isnan(Ym[k]) and not np.isnan(Xm[k, 0]):
if currmape < mape:
Xm[k, 1] = (
Ym[k] - reg.intercept_ - reg.coeff_[0] * X[k][0]
) / reg.coeff_[1]
d_refill[np.isnan(d).any(axis=1)][:, idx2][k] = Xm[k, 1]
mape = currmape
elif not np.isnan(Ym[k]) and not np.isnan(Xm[k, 1]):
if currmape < mape:
Xm[k, 0] = (
Ym[k] - reg.intercept_ - reg.coeff_[1] * X[k][1]
) / reg.coeff_[0]
d_refill[np.isnan(d).any(axis=1)][:, idx1][k] = Xm[k, 0]
mape = currmape
else:
raise MissingDataError(
"Both descriptor and regression target are undefined. This should have been fixed before this point. Exiting."
)
n = Y.size
m = p.size
dof = n - m
t = stats.t.ppf(0.95, dof)
resid = Y - Y_pred
chi2 = np.sum((resid / Y_pred) ** 2)
s_err = np.sqrt(np.sum(resid ** 2) / dof)
fig, ax = plt.subplots(
frameon=False, figsize=[3, 3], dpi=300, constrained_layout=True
)
yint = np.sort(Y_pred)
plot_ci_manual(t, s_err, n, X, xint, yint, ax=ax)
pi = (
t
* s_err
* np.sqrt(
1 + 1 / n + (xint - np.mean(X)) ** 2 / np.sum((X - np.mean(X)) ** 2)
)
)
ax.plot(xint, yint, "-", linewidth=1, color="#000a75", alpha=0.85)
for i in range(len(X)):
ax.scatter(
Y_pred[i],
Y[i],
s=12.5,
c=cbi[i],
marker=msi[i],
linewidths=0.15,
edgecolors="black",
)
# Border
ax.spines["top"].set_color("black")
ax.spines["bottom"].set_color("black")
ax.spines["left"].set_color("black")
ax.spines["right"].set_color("black")
ax.get_xaxis().set_tick_params(direction="out")
ax.get_yaxis().set_tick_params(direction="out")
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# Labels and key
plt.xlabel(f"Function of {tag1} and {tag2}")
plt.ylabel(f"{tags[j]} [kcal/mol]")
plt.xlim(xmin, xmax)
plt.savefig(f"{tags[j]}.png")
return np.hstack((d_refill, d2))
def plot_3d_t_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="t"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_s_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
dgr = d[i][-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = "-ΔG(pds) [kcal/mol]"
filename = f"t_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"t_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\D_pds",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_k_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_s_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = "-ΔG(kds) [kcal/mol]"
filename = f"k_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"k_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\D_kds",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_es_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = r"-δ$E$ [kcal/mol]"
filename = f"es_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"es_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\d_Ges",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_tof_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
T=298.15,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = | np.zeros_like(gridj) | numpy.zeros_like |
#!/usr/bin/env python
"""Convert dm3 files to other image formats.
"""
import os
import sys
import argparse
import numpy as np
# from PIL import Image
import DM3lib as dm3
from glob import glob
from wmem import parse, utils
def main(argv):
"""Convert dm3 files to other image formats."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser = parse.parse_convert_dm3(parser)
parser = parse.parse_common(parser)
args = parser.parse_args()
convert_dm3(
args.inputdir,
args.outputdir,
args.save_steps,
args.protective,
)
def convert_dm3(
filepaths,
outdir='',
save_steps=False,
protective=False,
):
"""Convert dm3 files to other image formats."""
# check output paths
outpaths = {'out': outdir}
status = utils.output_check(outpaths, save_steps, protective)
if status == "CANCELLED":
return
if not os.path.exists(outdir):
os.makedirs(savedir)
for filepath in filepaths:
filename = os.path.split(filepath)[1]
fileref = os.path.splitext(filename)[0]
im, dm3f = read_dm3_as_im(filepath)
if dumptags:
dm3f.dumpTags(outdir)
if '.tif' in outexts:
outfilepath = os.path.join(outdir, fileref + '.tif')
im.save(outfilepath)
def read_dm3_as_ndimage(filepath):
"""Read a .dm3 file."""
filename = os.path.split(filepath)[1]
fileref = os.path.splitext(filename)[0]
dm3f = dm3.DM3(filepath, debug=0)
im = dm3f.imagedata
return im, dm3f
def convert_to_8bit(im):
""""""
# - normalize image for conversion to 8-bit
aa_norm = aa.copy()
# -- apply cuts (optional)
if cuts[0] != cuts[1]:
aa_norm[ (aa <= min(cuts)) ] = float(min(cuts))
aa_norm[ (aa >= max(cuts)) ] = float(max(cuts))
# -- normalize
aa_norm = (aa_norm - np.min(aa_norm)) / ( | np.max(aa_norm) | numpy.max |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from numpy.linalg import norm
from sklearn.utils import check_array
from sklearn.exceptions import ConvergenceWarning
from .lasso_fast import celer
from .cython_utils import compute_norms_X_col, compute_Xw
from .multitask_fast import celer_mtl
from .PN_logreg import newton_celer
LASSO = 0
LOGREG = 1
def celer_path(X, y, pb, solver="celer", eps=1e-3, n_alphas=100, alphas=None,
coef_init=None, max_iter=20, gap_freq=10, max_epochs=50000,
p0=10, verbose=0, verbose_inner=0, tol=1e-6, prune=0,
return_thetas=False, X_offset=None, X_scale=None,
return_n_iter=False, positive=False):
"""Compute Lasso path with Celer as inner solver.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data or column
sparse format (CSC) to avoid unnecessary memory duplication.
y : ndarray, shape (n_samples,)
Target values
pb : "lasso" | "logreg"
Optimization problem to solve.
solver : "celer" | "PN"
Algorithm to use if pb == "logreg".
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min = 1e-3 * alpha_max``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
coef_init : ndarray, shape (n_features,) | None, optional, (defualt=None)
Initial value of coefficients. If None, np.zeros(n_features) is used.
max_iter : int, optional
The maximum number of iterations (subproblem definitions)
gap_freq : int, optional
Number of coordinate descent epochs between each duality gap
computations.
max_epochs : int, optional
Maximum number of CD epochs on each subproblem.
p0 : int, optional
First working set size.
verbose : bool or integer, optional
Amount of verbosity.
verbose_inner : bool or integer
Amount of verbosity in the inner solver.
tol : float, optional
The tolerance for the optimization: the solver runs until the duality
gap is smaller than ``tol`` or the maximum number of iteration is
reached.
prune : 0 | 1, optional
Whether or not to use pruning when growing working sets.
return_thetas : bool, optional
If True, dual variables along the path are returned.
X_offset : np.array, shape (n_features,), optional
Used to center sparse X without breaking sparsity. Mean of each column.
See sklearn.linear_model.base._preprocess_data().
X_scale : np.array, shape (n_features,), optional
Used to scale centered sparse X without breaking sparsity. Norm of each
centered column. See sklearn.linear_model.base._preprocess_data().
return_n_iter : bool, optional
If True, number of iterations along the path are returned.
positive : bool, optional (default=False)
When set to True, if pb == "lasso", forces the coefficients to be
positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
Duality gaps returned by the solver along the path.
thetas : array, shape (n_alphas, n_samples)
The dual variables along the path.
(Is returned only when ``return_thetas`` is set to True).
"""
assert pb in ("lasso", "logreg")
if pb == "lasso":
solver = "celer"
pb = LASSO
else:
pb = LOGREG
assert solver in ("celer", "PN")
if set(y) - set([-1.0, 1.0]):
raise ValueError(
"y must contain only -1. or 1 values. Got %s " % (set(y)))
is_sparse = sparse.issparse(X)
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=False)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
if X_offset is not None:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = X_offset / X_scale
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
if alphas is None:
if positive:
alpha_max = np.max(X.T.dot(y)) / n_samples
elif pb == "logreg":
alpha_max = norm(X.T @ y, ord=np.inf) / 2.
else:
alpha_max = norm(X.T @ y, ord=np.inf) / n_samples
alphas = alpha_max * np.geomspace(1, eps, n_alphas,
dtype=X.dtype)
else:
alphas = np.sort(alphas)[::-1]
n_alphas = len(alphas)
coefs = np.zeros((n_features, n_alphas), order='F', dtype=X.dtype)
thetas = np.zeros((n_alphas, n_samples), dtype=X.dtype)
dual_gaps = np.zeros(n_alphas)
if return_n_iter:
n_iters = np.zeros(n_alphas, dtype=int)
if is_sparse:
X_dense = np.empty([1, 1], order='F', dtype=X.data.dtype)
X_data = X.data
X_indptr = X.indptr
X_indices = X.indices
else:
X_dense = X
X_data = | np.empty([1], dtype=X.dtype) | numpy.empty |
__author__ = "<NAME>"
__version__ = "0.1"
import os, sys
from dataclasses import dataclass
from typing import List
import functools
import operator
import numpy as np
from astropy import units as u
from astropy import constants as const
from astropy.table import QTable, Column
from colorama import Fore
from colorama import init
init(autoreset=True)
def average_(x, n):
"""
Bin an array by averaging n cells together
Input:
x: astropy column
n: number of cells to average
Return:
average each n cells
"""
return np.average(x.reshape((-1, n)), axis=1)
def average_err(x, n):
"""
For binning an array by averaging n cells together, propagation of errors by
sqrt(e1**2+e2**2+e3**2.+...+en**2.)/n
Input:
x: astropy column, for error
n: number of cells to average
Return:
geometric mean of errors
"""
return np.sqrt(np.average((x ** 2).reshape((-1, n)), axis=1) / n)
def sigma_to_fwhm(sigma):
"""
Convert from Gaussian sigma to FWHM
"""
return sigma * 2.0 * np.sqrt(2.0 * np.log(2.0))
def fwhm_to_sigma(fwhm):
"""
Convert FWHM of 1D Gaussian to sigma
"""
return fwhm / (2.0 * np.sqrt(2.0 * | np.log(2.0) | numpy.log |
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, subprocess
import numpy as np
import GenericUsefulScripts as GUS
from astropy import units as u
from astropy.io import ascii, fits
from astropy.convolution import convolve
from astropy.stats import SigmaClip
from astropy.coordinates import SkyCoord
from photutils.background import MedianBackground, Background2D
from skimage.transform import resize
import multiprocessing
import ChrisFuncs
import pandas as pd
space = ' '
def data_reduction(galaxy_name, path_fits_input = 'standard'):
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
for data in list_data:
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
continue
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
print()
print('Data reduction phase over.')
print()
return
def data_reduction_parallel(galaxy_name, processes = 5, path_fits_input = 'standard'):
from itertools import repeat
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
pool = multiprocessing.Pool()
with multiprocessing.Pool(processes=processes) as pool:
func = zip(list_data, repeat(galaxy_name), \
repeat(ap_cen_coord), repeat(semimaj), repeat(axial_ratio), repeat(pos_angle))
pool.starmap(reduction_loop_parallel, func)
print()
print('Data reduction phase over.')
print()
return
def reduction_loop_parallel(data, galaxy_name, ap_cen_coord, semimaj, axial_ratio, pos_angle):
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
return
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
return
def reduce(data, Gal_Ap_Stuff, psf_degrad = True, sky_sub = True):
#if data.bandname[:7] == 'Spitzer':
# print
# print(space+"Spitzer bands usually have a problem with sky subtraction")
# print(space+"Evaluated background average is "+str(bkg_average)+". Perhaps it's too low.")
# print(space+"Do you want to insert the bkg average by hand? (insert value or n)")
# answer = raw_input()
# if answer == 'n': pass
# else: bkg_average = float(answer)
#else: pass
ok_nan = np.where(np.nan_to_num(data.signal_with_nans-1) == 0) # I know, can't do anything 'bout it
if sky_sub:
# Sky subtraction
print(space+'Sky subtraction for '+data.bandname+' ...')
# 1) Flatten the background
signal_flat, check_sub = sky_flattening(data, Gal_Ap_Stuff)
# 2) If check_sub is sub, the sky has already been flattened + removed
# if not, remove the average background
if check_sub == 'sub':
signal_skysub = signal_flat.copy()
elif check_sub == 'unsub':
bkg_average = evaluate_bkg_avg(signal_flat, Gal_Ap_Stuff)
if bkg_average < 0:
print(space+"Evaluated background average is lower than 0. Returning original map.")
signal_skysub = signal_flat.copy()
else:
print(space+"Evaluated background average is {0:.2E}".format(bkg_average))
signal_skysub = signal_flat - bkg_average
else:
print(space+'No sky flattening + subtraction requested. Hey, whatever you want.')
signal_skysub = data.signal.copy()
if psf_degrad:
print(space+'PSF degradation for '+data.bandname+' ...')
if data.bandname == 'SPIRE_350':
return signal_skysub
else:
try:
kernel_path = '../_kernels/Kernel_LoRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
except:
print(space+'No LowResolution kernel, switching to (slower) HighResolution.')
kernel_path = '../_kernels/Kernel_HiRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
return signal_conv
else:
print(space+'No PSF degradation requested. I beg you to reconsider.')
signal_skysub[ok_nan] = np.nan
return signal_skysub
def sky_flattening(data, Gal_Ap_Stuff):
from astropy.modeling.polynomial import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from scipy.ndimage.interpolation import zoom
# 1) Read data, get pixel scale
image = data.signal_with_nans
pix_size = (data.get_pixel_scale()*u.deg).to('arcsec').value
bandname = data.bandname
# 2) If image has pixels smaller than some limit, downsample image to improve processing time
pix_size_limit = 2.0
if pix_size<pix_size_limit: downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else: downsample_factor = 1
image_ds = GUS.Downsample(image, downsample_factor)
# 3) Sigma clip the downsampled image
clip_value = GUS.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff_sigma = 2.0
cutoff = field_value + ( cutoff_sigma * noise_value )
# 4) Mask the image removing galaxy emission...
image_masked = image_ds.copy()
centre_i, centre_j, mask_semimaj_pix, mask_axial_ratio, mask_angle = Gal_Ap_Stuff
ellipse_mask = EllipseMask(image_ds, mask_semimaj_pix/downsample_factor, mask_axial_ratio, mask_angle, centre_i/downsample_factor, centre_j/downsample_factor)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# ...and image pixels identified as having high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# 5) Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = Polynomial2D(degree=5)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# 6) Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
print(space+'Error fitting polinomial sky model. Returning unalterated image.')
return image
# 7) Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = zoom(poly_fit, [ float(image.shape[0])/float(poly_fit.shape[0]), \
float(image.shape[1])/float(poly_fit.shape[1]) ], mode='nearest')
# 8) Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = GUS.SigmaClip(image, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = image[ np.where( image<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# 9) How much reduction in background variation there was due to application of the filter
image_sub = image - poly_full
clip_sub = GUS.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub < clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
print(space+bandname+' background is significantly variable; removing polynomial background fit.')
return image_sub, 'sub'
else:
print(space+bandname+' background is not significantly variable; leaving image unaltered.')
return image, 'unsub'
def evaluate_bkg_avg(image, Gal_Ap_Stuff):
'''
Function to evaluate the mean background in an elliptical annulus between 1.25 and 1.601 times the galaxy semimajor axis (from DustPedia photometric table).
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the mean background per pixel.
'''
centre_x, centre_y, semimaj_pix, axial_ratio, pos_angle = Gal_Ap_Stuff
# =========
# Evaluate pixels in background annulus
bg_inner_semimaj_pix = semimaj_pix * 1.25
bg_width = (semimaj_pix * 1.601) - bg_inner_semimaj_pix
bg_calc = AnnulusSum(image, bg_inner_semimaj_pix, bg_width, axial_ratio, pos_angle, centre_x, centre_y)
bg_clip = GUS.SigmaClip(bg_calc[2], median=False, sigma_thresh=3.0)
# =========
return bg_clip[1]
def check_Dustpedia(galaxy_name, working_bands):
'''
Function to check if DustPedia photometric flux and the one measured in the same apertures with our data reduction are compatible.
Args: Galaxy name, working bands, if wanted, perform Galactic Extinction Correction
Returns: Nothing, generates a plot in Reduction folder.
'''
import os, subprocess
from astropy.io import fits, ascii
from astropy import units as u
import pandas as pd
import numpy as np
from photutils import SkyEllipticalAperture, SkyEllipticalAnnulus, aperture_photometry
from astropy.coordinates import SkyCoord
from matplotlib import pyplot as plt
subprocess.call('mkdir ../'+galaxy_name+'/Reduction/', shell = True)
path_galaxy_photometry = '../'+galaxy_name+'/Reduction/'+galaxy_name+'_photometry.dat'
# =========
# Read DustPedia Photometric Table
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
# Rearrange in order of increasing effective wavelenght
right_order = [u'name', u'ra', u'dec', u'semimaj_arcsec', u'axial_ratio', u'pos_angle', u'global_flag',
u'GALEX_FUV', u'GALEX_FUV_err', u'GALEX_FUV_flag', u'GALEX_NUV', u'GALEX_NUV_err', u'GALEX_NUV_flag',
u'SDSS_u', u'SDSS_u_err', u'SDSS_u_flag', u'SDSS_g', u'SDSS_g_err', u'SDSS_g_flag',
u'SDSS_r', u'SDSS_r_err', u'SDSS_r_flag', u'SDSS_i', u'SDSS_i_err', u'SDSS_i_flag',
u'SDSS_z', u'SDSS_z_err', u'SDSS_z_flag',
u'2MASS_J', u'2MASS_J_err', u'2MASS_J_flag', u'2MASS_H', u'2MASS_H_err', u'2MASS_H_flag',
u'2MASS_Ks', u'2MASS_Ks_err', u'2MASS_Ks_flag',
u'WISE_3.4', u'WISE_3.4_err', u'WISE_3.4_flag', u'Spitzer_3.6', u'Spitzer_3.6_err', u'Spitzer_3.6_flag',
u'Spitzer_4.5', u'Spitzer_4.5_err', u'Spitzer_4.5_flag', u'WISE_4.6', u'WISE_4.6_err', u'WISE_4.6_flag',
u'Spitzer_5.8', u'Spitzer_5.8_err', u'Spitzer_5.8_flag', u'Spitzer_8.0', u'Spitzer_8.0_err', u'Spitzer_8.0_flag',
u'WISE_12', u'WISE_12_err', u'WISE_12_flag', u'WISE_22', u'WISE_22_err', u'WISE_22_flag',
u'Spitzer_24', u'Spitzer_24_err', u'Spitzer_24_flag', u'Spitzer_70', u'Spitzer_70_err', u'Spitzer_70_flag',
u'PACS_70', u'PACS_70_err', u'PACS_70_flag', u'PACS_100', u'PACS_100_err', u'PACS_100_flag',
u'PACS_160', u'PACS_160_err', u'PACS_160_flag', u'Spitzer_160', u'Spitzer_160_err', u'Spitzer_160_flag',
u'SPIRE_250', u'SPIRE_250_err', u'SPIRE_250_flag', u'SPIRE_350', u'SPIRE_350_err', u'SPIRE_350_flag',
u'SPIRE_500', u'SPIRE_500_err', u'SPIRE_500_flag']
DustPedia_Photom = DustPedia_Photom[right_order]
gal_phot = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
# Fist, remove _flag columns
to_remove = gal_phot.columns.str.contains('flag', case=False)
gal_phot = gal_phot.loc[:,~to_remove]
# Extract ra, dec, semimaj, axial ratio and pos_angle, then remove them
ra, dec = gal_phot['ra'].values[0], gal_phot['dec'].values[0]
semimaj, axial_ratio, pos_angle = gal_phot['semimaj_arcsec'].values[0], gal_phot['axial_ratio'].values[0], gal_phot['pos_angle'].values[0]
to_remove = ['name', 'ra', 'dec', 'semimaj_arcsec', 'axial_ratio', 'pos_angle']
gal_phot = gal_phot.drop(columns=to_remove)
# And remove empy columns
#gal_phot = gal_phot.dropna(axis='columns')
# Extract working bands fluxes and errors
gal_phot_flux = gal_phot[working_bands]
gal_phot_flux = gal_phot_flux.transpose()
working_bands_err = [t+'_err' for t in working_bands]
gal_phot_err = gal_phot[working_bands_err]
gal_phot_err = gal_phot_err.transpose()
galaxy_photometry = pd.DataFrame(np.concatenate((gal_phot_flux.values, gal_phot_err.values), axis=1))
galaxy_photometry.columns = ['Flux', 'Error']
galaxy_photometry.index = working_bands
galaxy_photometry = galaxy_photometry.fillna(0) # Fill NaN entries with zeroes
# Save
galaxy_photometry.index.names = ['Band'] # Rename the index column as "Band"
galaxy_photometry.to_csv(path_galaxy_photometry, sep='\t', index = False)
# =========
# =========
# APERTURES
# Read the apertures + radii
positions = SkyCoord(ra*u.deg, dec*u.deg, frame='icrs')
DustPedia_aperture = SkyEllipticalAperture(positions, a=semimaj*u.arcsec, b=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
DustPedia_annulus = SkyEllipticalAnnulus(positions, a_in=semimaj*u.arcsec*1.25, a_out=semimaj*u.arcsec*1.601, \
b_out=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
# =========
# =========
# Galactic Extinction Correction dictionary
GalCorr_path = '../'+galaxy_name+'/galactic_extinction_correction.txt'
if os.path.exists(GalCorr_path): pass
else: GalExtCorr(galaxy_name, working_bands, ra, dec)
GalCorrection_dictionary = dict(zip(ascii.read(GalCorr_path)['Band'].data, \
ascii.read(GalCorr_path)['Correction'].data))
# =========
# =========
# Read reduced data and perform photometry
path_fits = '../'+galaxy_name+'/_ReducedMaps/'
list_data = []
for file in os.listdir(path_fits):
if not file.endswith('.fits'): continue
elif file.startswith('In'): continue
list_data.append(GUS.FitsUtils(path_fits+file))
list_fluxes = []
for data in list_data:
# Perform photometry
phot_table = aperture_photometry(data.signal, DustPedia_aperture, wcs = data.wcs)
phot_table['aperture_sum'].info.format = '%.4g'
# Put results in a single file
phot = GUS.round_arr(phot_table['aperture_sum'].data, 2)
# Galactic extintion correction
phot *= GalCorrection_dictionary[data.bandname]
list_fluxes.append(abs(phot))
fluxes = np.array(list_fluxes)
# Sort w.r.t wavelengths
list_wvl = (t.get_wavelength() for t in list_data)
list_band = (t.bandname for t in list_data)
wvl, fluxes, bandnames = (t for t in zip(*sorted(zip(list_wvl, fluxes, list_band))))
wvl, fluxes = np.array(wvl), np.array(fluxes)[:,0]
# Save the results
ascii.write([bandnames, GUS.round_arr(wvl,2), GUS.round_arr(fluxes, 2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_fluxes.txt', \
names = ['Band', 'Wvl', 'Fluxes'], overwrite=True)
# =========
# =========
# Re-read Dustpedia Photometry
data_CAAPR = ascii.read(path_galaxy_photometry)
fluxes_CAAPR, errors_CAAPR = data_CAAPR['Flux'].data, data_CAAPR['Error'].data
compatibility = np.abs(np.array(fluxes_CAAPR) - np.array(fluxes))/np.sqrt(np.array(errors_CAAPR)**2)
ascii.write([GUS.round_arr(compatibility,2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_comp.txt', format='fixed_width_two_line', \
names = ['Comp'], overwrite=True)
# =========
# =========
# Plot
xmin, xmax = np.array(wvl).min(), np.array(wvl).max()
DustpediaCheckPlot = plt.figure(figsize=(15,5))
plt.subplot(2,1,1)
plt.plot(np.array(wvl), np.array(fluxes_CAAPR), \
linestyle = 'None', marker = '.', color = 'navy', label = 'CAAPR+Literature Photometry')
plt.plot(wvl, fluxes, linestyle = 'None', marker = '.', color = 'red', label = 'My Photometry')
plt.xscale('log'), plt.yscale('log')
plt.ylabel(r'Flux (Jy)')
plt.legend()
plt.subplot(2,1,2)
plt.axhline(5, color = 'r', linestyle = '-')
plt.plot(wvl, compatibility, ms = 10.0, linestyle = 'None', color = 'k', marker = '.')
for i in range(len(wvl)):
plt.text(wvl[i], 0.5, bandnames[i], rotation = 90)
plt.xscale('log'), plt.yscale('log')
plt.xlabel(r'Wavelength ($\mu$m)'), plt.ylabel(r'Compatibility $\lambda$')
plt.subplots_adjust(hspace=0.,wspace=0.)
DustpediaCheckPlot.savefig('../'+galaxy_name+'/Reduction/'+galaxy_name+'_SED.pdf', bbox_inches = 'tight')
# =========
return
def GalExtCorr(galaxy_name, list_band, ra, dec):
list_correction = []
for band in list_band:
try:
if band == 'Spitzer_3.6': band = 'IRAC1'
elif band == 'Spitzer_4.5': band = 'IRAC2'
elif band == 'Spitzer_5.8': band = 'IRAC3'
elif band == 'Spitzer_8.0': band = 'IRAC4'
elif band == 'WISE_3.4': band = 'WISE1'
elif band == 'WISE_4.6': band = 'WISE2'
correction = ChrisFuncs.ExtCorrrct(ra, dec, band, verbose = False)
list_correction.append(correction)
except: list_correction.append(1)
ascii.write([list_band, list_correction], \
'../'+galaxy_name+'/galactic_extinction_correction.txt', names = ['Band', 'Correction'])
return
##################################
# QUI COPIO BRUTALMENTE DA CLARK #
##################################
def AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre):
'''
Function to sum all elements in an annulus centred upon the middle of the given array
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
'''
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+(rad_inner+width)])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: AnnulusSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner)
semi_min_inner = float(semi_maj_inner) / float(axial_ratio)
semi_maj_outer = float(rad_inner) + float(width)
semi_min_outer = float(semi_maj_outer) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create array identifying which coordinates lie within outer ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==False) )
annulus_tot = sum( array[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = array[ annulus_where ]
annulus_nan = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==True) )
# Return results
return [annulus_tot, annulus_count, annulus_pix, annulus_nan]
def EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre):
'''
Function to return a mask identifying all pixels within an ellipse of given parameters
Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Mask array of same dimensions as input array where pixels that lie within ellipse have value 1
'''
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
if angle.dtype != 'float': angle = float(angle.value)
try:
if angle.unit == 'rad': pass
else: angle = np.radians(angle) # Convert the angle in radians
except: angle = np.radians(angle) # Vabbè, assumo che sia da convertire e sticazzi
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = | np.linspace(0, array.shape[0]-1, array.shape[0]) | numpy.linspace |
"""Unit tests for convexified belief propagation"""
import unittest
from mrftools import *
import numpy as np
import matplotlib.pyplot as plt
class TestConvexBP(unittest.TestCase):
"""
Unit test class for convexified belief propagation
"""
def create_q_model(self):
"""Create loop model with one variable hanging off the loop (forming a Q shape)."""
mn = MarkovNet()
np.random.seed(1)
k = [4, 3, 6, 2, 5]
mn.set_unary_factor(0, np.random.randn(k[0]))
mn.set_unary_factor(1, | np.random.randn(k[1]) | numpy.random.randn |
import os
import sys
import time
import argparse
import pandas as pd
import numpy as np
from scipy import interp
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
plt.style.use('ggplot')
sns.set(color_codes=True)
sns.set(font_scale=4)
sns.set_style("whitegrid")
sns.set_context("paper")
sns.set(style='white', palette='muted', color_codes=True)
sns.despine(left=True)
fig_format = 'pdf'
if not os.path.exists('fig'):
os.system('mkdir fig')
##########
# figure 1
##########
title_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
filename_dataset = 'snap_data_w_annot_1000_seq_reg.txt'
dataset = pd.read_csv(filename_dataset, sep='\t')
d1 = dataset[dataset['label']==1]
d0 = dataset[dataset['label']==0]
# AFR, AMR, ASN, EUR
for i, ft in enumerate(['AFR', 'AMR', 'ASN', 'EUR']):
f = plt.figure()
x_range = np.linspace(0.0, 1.0, 11)
ft_ll1 = np.histogram(d1[ft], x_range, density=True)[0]
ft_ll0 = np.histogram(d0[ft], x_range, density=True)[0]
ft_llr = | np.log(ft_ll1 / ft_ll0) | numpy.log |
import numpy as np
import torch
import torch.utils.data
import random
import copy
from pathlib import Path
from mavb.forward import simulate_type_1
from mavb.forward import simulate_type_2a
from mavb.forward import simulate_type_2b
from mavb.forward import simulate_type_2c
from mavb.forward import simulate_type_3
constants_folder = Path("constants/")
def block_pattern_to_one_hot(block_pattern, length):
block_pattern = np.array(block_pattern)
one_hot = np.zeros((block_pattern.size, length))
one_hot[np.arange(block_pattern.size), block_pattern] = 1
return one_hot
def find_nearest(array, value):
array = np.array(array)
idx = np.argmin((np.abs(array - value)), axis=0)
return idx
def y_to_cond(matching, seq_len, y):
cond = np.zeros((2, seq_len))
for i in range(len(matching)-1):
start = matching[i]
end = matching[i+1]
cond[1, start] = y[i]
if end-start > 1:
cond[0, start+1:end+1] = y[i]
else:
cond[0, end] = y[i]
if i == 0:
cond[0, start] = y[i]
cond[1, start] = 0
return cond
def check_block_pattern_splitter(block_pattern, n_Rwaves, btype):
res_bp = []
res_type = []
block_pattern_res = copy.deepcopy(block_pattern)
if btype == "1" or btype == "2a":
differences = []
for i in range(len(block_pattern_res[1]) - 1):
differences.append(
abs(block_pattern_res[1][i]-block_pattern_res[1][i+1]))
if len(differences) == 0:
differences = [0]
if max(differences) <= 3:
block_sum = sum(block_pattern_res[1])
if block_sum >= n_Rwaves and block_sum - block_pattern_res[1][-1] < n_Rwaves:
res_bp.append(copy.deepcopy([block_pattern_res[1]]))
res_type.append("1") if btype == "1" else res_type.append("2a")
if btype == "2b":
differences = []
for i in range(len(block_pattern_res[1]) - 1):
differences.append(
abs(block_pattern_res[1][i]-block_pattern_res[1][i+1]))
if len(differences) == 0:
differences = [0]
if max(differences) <= 3:
block_sum = sum(block_pattern_res[1])
if block_sum >= (2 * n_Rwaves - 1) and block_sum - block_pattern_res[1][-1] <= (2 * n_Rwaves - 1):
res_bp.append(copy.deepcopy([block_pattern_res[1]]))
res_type.append("2b")
if btype == "2c":
block_sum_0 = sum(block_pattern_res[0])
block_sum_1 = len(block_pattern_res[1])
if block_sum_1 == n_Rwaves:
if block_pattern_res[1][-1] == 0:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1]]))
res_type.append("2c")
if block_pattern_res[1][-1] == 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1]]))
res_type.append("2c")
if btype == "3":
block_sum_0 = sum(block_pattern_res[0])
block_sum_1 = len(block_pattern_res[1])
block_sum_2 = len(block_pattern_res[2])
if block_sum_2 == n_Rwaves:
if block_pattern_res[2][-1] == 0:
if block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if block_pattern_res[1][-1] == 0:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[2][-1] == 1:
if (block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) or
block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1):
if block_pattern_res[1][-1] == 0 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 0 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
return res_bp, res_type
def check_block_pattern_alt(block_pattern, n_Rwaves):
res_bp = []
res_type = []
block_pattern_res = copy.deepcopy(block_pattern)
if (len(block_pattern_res[0]) != 0 and len(block_pattern_res[2]) != 0 and len(np.where(np.array(block_pattern_res[0]) == 0)[0]) == len(block_pattern_res[0]) and
len(np.where(np.array(block_pattern_res[2]) == 0)[0]) == len(block_pattern_res[2]) and
np.any(block_pattern_res[1]) and min(block_pattern_res[1]) >= 1 and max(block_pattern_res[1]) <= 7):
differences = []
for i in range(len(block_pattern_res[1]) - 1):
differences.append(
abs(block_pattern_res[1][i]-block_pattern_res[1][i+1]))
if len(differences) == 0:
differences = [0]
if max(differences) <= 3:
block_sum_0 = len(block_pattern_res[0])
block_sum_1 = sum(block_pattern_res[1])
block_sum_2 = len(block_pattern_res[2])
if (((block_sum_0 == n_Rwaves + len(block_pattern_res[1]) - 1 and block_sum_1 >= n_Rwaves) or
(block_sum_0 == n_Rwaves + len(block_pattern_res[1]) and block_sum_1 == n_Rwaves)) and
(block_sum_1 - block_pattern_res[1][-1]) < n_Rwaves and
block_sum_2 == n_Rwaves):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("1")
if (len(block_pattern_res[0]) != 0 and len(block_pattern_res[2]) != 0 and len(np.where(np.array(block_pattern_res[2]) == 0)[0]) == len(block_pattern_res[2]) and
len(np.where(np.array(block_pattern_res[0]) == 1)[0]) == len(block_pattern_res[0]) and
np.any(block_pattern_res[1]) and min(block_pattern_res[1]) >= 1 and max(block_pattern_res[1]) <= 7):
differences = []
for i in range(len(block_pattern_res[1]) - 1):
differences.append(
abs(block_pattern_res[1][i]-block_pattern_res[1][i+1]))
if len(differences) == 0:
differences = [0]
if max(differences) <= 3:
block_sum_0 = sum(block_pattern_res[0])
block_sum_1 = sum(block_pattern_res[1])
block_sum_2 = len(block_pattern_res[2])
if (((block_sum_0 == n_Rwaves + len(block_pattern_res[1]) - 1 and block_sum_1 >= n_Rwaves) or
(block_sum_0 == n_Rwaves + len(block_pattern_res[1]) and block_sum_1 == n_Rwaves)) and
(block_sum_1 - block_pattern_res[1][-1]) < n_Rwaves and
block_sum_2 == n_Rwaves):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2a")
if (len(block_pattern_res[0]) != 0 and len(block_pattern_res[2]) != 0 and len(np.where(np.array(block_pattern_res[0]) == 0)[0]) == len(block_pattern_res[0]) and
len(np.where(np.array(block_pattern_res[2]) == 1)[0]) == len(block_pattern_res[2]) and
np.any(block_pattern_res[1]) and min(block_pattern_res[1]) >= 1 and max(block_pattern_res[1]) <= 7):
differences = []
for i in range(len(block_pattern_res[1]) - 1):
differences.append(
abs(block_pattern_res[1][i]-block_pattern_res[1][i+1]))
if len(differences) == 0:
differences = [0]
if max(differences) <= 3:
block_sum_0 = len(block_pattern_res[0])
block_sum_1 = sum(block_pattern_res[1])
block_sum_2 = sum(block_pattern_res[2])
if block_sum_2 == n_Rwaves:
if block_sum_1 == 2 * n_Rwaves - 1:
if (block_sum_0 == 2 * n_Rwaves - 1 + len(block_pattern_res[1]) or
block_sum_0 == 2 * n_Rwaves - 1 + len(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2b")
if block_sum_1 == 2 * n_Rwaves:
if (block_sum_0 == 2 * n_Rwaves + len(block_pattern_res[1]) or
block_sum_0 == 2 * n_Rwaves + len(block_pattern_res[1]) - 1 or
block_sum_0 == 2 * n_Rwaves - 1 + len(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2b")
if block_sum_1 > 2 * n_Rwaves:
if (block_sum_1 - block_pattern_res[1][-1] == 2 * n_Rwaves - 1):
if block_sum_0 == 2 * n_Rwaves - 1 + len(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2b")
if block_sum_1 - block_pattern_res[1][-1] < 2 * n_Rwaves - 1:
if (block_sum_0 == 2 * n_Rwaves + len(block_pattern_res[1]) - 1 or
block_sum_0 == 2 * n_Rwaves - 1 + len(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2b")
if (len(block_pattern_res[2]) != 0 and len(np.where(np.array(block_pattern_res[2]) == 0)[0]) == len(block_pattern_res[2]) and
np.any(block_pattern_res[0]) and len(block_pattern_res[1]) != 0 and
min(block_pattern_res[0]) >= 1 and max(block_pattern_res[0]) <= 2 and min(block_pattern_res[1]) >= 0 and max(block_pattern_res[1]) <= 1):
block_sum_0 = sum(block_pattern_res[0])
block_sum_1 = len(block_pattern_res[1])
block_sum_2 = len(block_pattern_res[2])
if block_sum_1 == n_Rwaves and block_sum_2 == n_Rwaves:
if block_pattern_res[1][-1] == 0:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2c")
if block_pattern_res[1][-1] == 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("2c")
if (np.any(block_pattern_res[0]) and len(block_pattern_res[1]) != 0 and len(block_pattern_res[2]) != 0 and
min(block_pattern_res[0]) >= 1 and max(block_pattern_res[0]) <= 2 and min(block_pattern_res[1]) >= 0 and max(block_pattern_res[1]) <= 1 and
min(block_pattern_res[2]) >= 0 and max(block_pattern_res[2]) <= 1):
block_sum_0 = sum(block_pattern_res[0])
block_sum_1 = len(block_pattern_res[1])
block_sum_2 = len(block_pattern_res[2])
if block_sum_2 == n_Rwaves:
if block_pattern_res[2][-1] == 0:
if block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if block_pattern_res[1][-1] == 0:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[2][-1] == 1:
if (block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) or
block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1):
if block_pattern_res[1][-1] == 0 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 0 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1:
if block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]):
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
if block_pattern_res[1][-1] == 1 and block_sum_1 == n_Rwaves + sum(block_pattern_res[2]) - 1:
if (block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]) or
block_sum_0 == n_Rwaves + sum(block_pattern_res[2]) - 1 + sum(block_pattern_res[1]) - 1):
res_bp.append(copy.deepcopy(
[block_pattern_res[0], block_pattern_res[1], block_pattern_res[2]]))
res_type.append("3")
return res_bp, res_type
def seq_to_block_pattern(x):
block_pattern = [[], [], []]
idx = 0
status_1 = [x[idx][0] + 1, 1 if x[idx][0] == 0 else x[idx][0]]
status_2 = [x[idx][1] + 1, 1 if x[idx][1] == 0 else x[idx][1]]
status_3 = [x[idx][2] + 1, 1 if x[idx][2] == 0 else x[idx][2]]
block_pattern[0].append(x[idx][0])
block_pattern[1].append(x[idx][1])
block_pattern[2].append(x[idx][2])
while True:
if idx == len(x) - 1:
break
update_1 = False
update_2 = False
update_3 = False
change_1_0 = max(0, status_1[0] - 1)
change_1_1 = max(0, status_1[1] - 1)
change_2_0 = max(0, status_2[0] -
1 if status_1[1] != 0 else status_2[0])
change_2_1 = max(0, status_2[1] -
1 if status_1[1] != 0 else status_2[1])
change_3_0 = max(0, status_3[0] -
1 if status_2[1] != 0 else status_3[0])
change_3_1 = max(0, status_3[1] -
1 if status_2[1] != 0 else status_3[1])
status_1[0] = change_1_0
status_1[1] = change_1_1
status_2[0] = change_2_0
status_2[1] = change_2_1
status_3[0] = change_3_0
status_3[1] = change_3_1
idx += 1
if status_1 == [0, 0]:
update_1 = True
if status_2 == [0, 0] and (status_1[1] != 0 or update_1):
update_2 = True
if status_3 == [0, 0] and (status_1[1] != 0 or update_1) and (status_2[1] != 0 or update_2):
update_3 = True
if update_1:
block_pattern[0].append(x[idx][0])
status_1 = [x[idx][0] + 1, 1 if x[idx][0] == 0 else x[idx][0]]
if update_2:
block_pattern[1].append(x[idx][1])
status_2 = [x[idx][1] + 1, 1 if x[idx][1] == 0 else x[idx][1]]
if update_3:
block_pattern[2].append(x[idx][2])
status_3 = [x[idx][2] + 1, 1 if x[idx][2] == 0 else x[idx][2]]
return block_pattern
def block_pattern_to_seq(block_pattern):
x = []
idx_1 = 0
idx_2 = 0
idx_3 = 0
matching = [0]
matching_counter = 0
status_1 = [block_pattern[0][idx_1] + 1, 1 if block_pattern[0]
[idx_1] == 0 else block_pattern[0][idx_1]]
status_2 = [block_pattern[1][idx_2] + 1, 1 if block_pattern[1]
[idx_2] == 0 else block_pattern[1][idx_2]]
status_3 = [block_pattern[2][idx_3] + 1, 1 if block_pattern[2]
[idx_3] == 0 else block_pattern[2][idx_3]]
while True:
x.append([block_pattern[0][idx_1], block_pattern[1]
[idx_2], block_pattern[2][idx_3]])
if idx_3 == len(block_pattern[2]) - 1:
break
matching_counter += 1
update_1 = False
update_2 = False
update_3 = False
change_1_0 = max(0, status_1[0] - 1)
change_1_1 = max(0, status_1[1] - 1)
change_2_0 = max(0, status_2[0] -
1 if status_1[1] != 0 else status_2[0])
change_2_1 = max(0, status_2[1] -
1 if status_1[1] != 0 else status_2[1])
change_3_0 = max(0, status_3[0] -
1 if status_2[1] != 0 else status_3[0])
change_3_1 = max(0, status_3[1] -
1 if status_2[1] != 0 else status_3[1])
status_1[0] = change_1_0
status_1[1] = change_1_1
status_2[0] = change_2_0
status_2[1] = change_2_1
status_3[0] = change_3_0
status_3[1] = change_3_1
if status_1 == [0, 0]:
idx_1 += 1
update_1 = True
if status_2 == [0, 0] and (status_1[1] != 0 or update_1):
idx_2 += 1
update_2 = True
if status_3 == [0, 0] and (status_1[1] != 0 or update_1) and (status_2[1] != 0 or update_2):
idx_3 += 1
update_3 = True
matching.append(matching_counter)
if update_1:
status_1 = [block_pattern[0][idx_1] + 1, 1 if block_pattern[0]
[idx_1] == 0 else block_pattern[0][idx_1]]
if update_2:
status_2 = [block_pattern[1][idx_2] + 1, 1 if block_pattern[1]
[idx_2] == 0 else block_pattern[1][idx_2]]
if update_3:
status_3 = [block_pattern[2][idx_3] + 1, 1 if block_pattern[2]
[idx_3] == 0 else block_pattern[2][idx_3]]
return x, matching
def signals_to_bp(signals, n_Rwaves):
limit = np.where(np.array(signals[0]) == 1)[0][-1] + 1
candidate1 = []
candidate2 = []
candidate3 = []
counter = 0
sub1 = []
for i in range(limit):
if signals[0][i] == 1:
counter += 1
if i == limit - 1:
sub1.append(counter)
if signals[0][i] == 0:
sub1.append(counter)
counter = 0
if max(sub1) <= 7 and min(sub1) >= 1:
candidate1.append(sub1)
sub2 = []
for i in range(limit):
if i == limit - 1:
sub2.append(1)
break
if signals[0][i] == 1 and signals[0][i+1] == 0:
sub2.append(1)
if signals[0][i] == 1 and signals[0][i+1] == 1:
sub2.append(0)
if sub2 not in candidate1:
candidate1.append(sub2)
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate1:
candidate1.append(sub3)
idx_1 = np.where(np.array(signals[0]) == 1)[0]
counter = 0
sub1 = []
vary = False
for i in range(len(idx_1)):
if signals[1][idx_1[i]] == 1:
counter += 1
if i == len(idx_1) - 1:
sub1.append(counter)
vary = True
if signals[1][idx_1[i]] == 0:
sub1.append(counter)
counter = 0
if not vary:
if max(sub1) <= 7 and min(sub1) >= 1:
candidate2.append(sub1)
if vary:
if len(sub1) > 1 and max(sub1) <= 7 and min(sub1) >= 1:
low_limit = np.amax([1, sub1[-2] - 3])
up_limit = np.amin([7, sub1[-2] + 3])
valid_range = np.linspace(
low_limit, up_limit, up_limit - low_limit + 1, dtype='int16')
for val in valid_range:
if val >= sub1[-1]:
sub_alt = copy.deepcopy(sub1[:-1])
sub_alt += [val]
candidate2.append(sub_alt)
if len(sub1) == 1 and max(sub1) <= 7 and min(sub1) >= 1:
low_limit = sub1[0]
up_limit = 7
valid_range = np.linspace(
low_limit, up_limit, up_limit - low_limit + 1, dtype='int16')
for val in valid_range:
sub_alt = copy.deepcopy(sub1[:-1])
sub_alt += [val]
candidate2.append(sub_alt)
sub2 = []
alt = True
for i in range(len(idx_1)):
if i == len(idx_1) - 1 and signals[1][idx_1[i]] == 1:
sub2.append(1)
break
if i == len(idx_1) - 1 and signals[1][idx_1[i]] == 0:
alt = False
break
if signals[1][idx_1[i]] == 1 and signals[1][idx_1[i+1]] == 0:
sub2.append(1)
if signals[1][idx_1[i]] == 1 and signals[1][idx_1[i+1]] == 1:
sub2.append(0)
if sub2 not in candidate2:
candidate2.append(sub2)
if alt:
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate2:
candidate2.append(sub3)
idx_2 = np.where(np.array(signals[1]) == 1)[0]
sub2 = []
alt = True
for i in range(len(idx_2)):
if i == len(idx_2) - 1 and signals[2][idx_2[i]] == 1:
sub2.append(1)
break
if i == len(idx_2) - 1 and signals[2][idx_2[i]] == 0:
alt = False
break
if signals[2][idx_2[i]] == 1 and signals[2][idx_2[i+1]] == 0:
sub2.append(1)
if signals[2][idx_2[i]] == 1 and signals[2][idx_2[i+1]] == 1:
sub2.append(0)
if sub2 not in candidate3:
candidate3.append(sub2)
if alt:
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate3:
candidate3.append(sub3)
res = []
for i in range(len(candidate1)):
for j in range(len(candidate2)):
for k in range(len(candidate3)):
bp, bp_type = check_block_pattern_alt(
[candidate1[i], candidate2[j], candidate3[k]], n_Rwaves)
if len(bp) != 0:
res.append((bp, bp_type))
return res
def correct_bp(bp, bp_type, n_Rwaves):
bp_res = copy.deepcopy(bp)
if bp_type == "1":
if sum(bp_res[1]) > n_Rwaves:
bp_res[1][-1] -= abs(sum(bp_res[1]) - n_Rwaves)
if bp_type == "2a":
if sum(bp_res[1]) > n_Rwaves:
bp_res[1][-1] -= abs(sum(bp_res[1]) - n_Rwaves)
if bp_type == "2b":
if sum(bp_res[1]) == 2 * n_Rwaves and len(bp_res[0]) == 2 * n_Rwaves - 1 + len(bp_res[1]) - 1:
bp_res[1][-1] -= 1
return bp_res
if sum(bp_res[1]) > 2 * n_Rwaves:
if sum(bp_res[1]) - bp_res[1][-1] < 2 * n_Rwaves - 1:
if len(bp_res[0]) == 2 * n_Rwaves + len(bp_res[1]) - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - 2 * n_Rwaves)
return bp_res
if len(bp_res[0]) == 2 * n_Rwaves - 1 + len(bp_res[1]) - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - (2 * n_Rwaves - 1))
return bp_res
if sum(bp_res[1]) - bp_res[1][-1] == 2 * n_Rwaves - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - 2 * n_Rwaves)
return bp_res
def bp_to_signals(bp, bp_type, n_Rwaves, fill=True):
if bp_type == "1":
lvl1 = []
for b in bp[0]:
lvl1 += [1]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2a":
lvl1 = []
for b in bp[0]:
lvl1 += [1 for i in range(b)] + [0]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl1) == 0)[0]
for idx_i in idx:
lvl2.insert(idx_i, 0)
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2b":
lvl1 = []
for b in bp[0]:
lvl1 += [1]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
lvl3 = []
for b in bp[2]:
lvl3 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2c":
lvl1 = []
for b in bp[0]:
lvl1 += [1 for i in range(b)] + [0]
lvl2 = []
for b in bp[1]:
if b == 0:
lvl2 += [1]
else:
lvl2 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl1) == 0)[0]
for idx_i in idx:
lvl2.insert(idx_i, 0)
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "3":
lvl1 = []
for b in bp[0]:
lvl1 += [1 for i in range(b)] + [0]
lvl2 = []
for b in bp[1]:
if b == 0:
lvl2 += [1]
else:
lvl2 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl1) == 0)[0]
for idx_i in idx:
lvl2.insert(idx_i, 0)
lvl3 = []
for b in bp[2]:
if b == 0:
lvl3 += [1]
else:
lvl3 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if fill:
lvl1 += [0 for i in range(200 - len(lvl1))]
lvl2 += [0 for i in range(200 - len(lvl2))]
lvl3 += [0 for i in range(200 - len(lvl3))]
else:
lvl1 = lvl1[:np.where(np.array(lvl1) == 1)[0][-1] + 1]
lvl2 = lvl2[:len(lvl1)]
lvl3 = lvl3[:len(lvl1)]
return [lvl1, lvl2, lvl3]
def generate_block_pattern_alt(block_type, n_Rwaves):
if block_type == "1":
block_pattern_2 = [np.random.randint(1, 8)]
block_pattern_2_sum = block_pattern_2[0]
while block_pattern_2_sum < n_Rwaves:
current_block_ratio = block_pattern_2[-1]
block_pattern_2.append(np.random.randint(
np.amax([1, current_block_ratio-3]),
np.amin([8, current_block_ratio+4])))
block_pattern_2_sum += block_pattern_2[-1]
if block_pattern_2_sum == n_Rwaves:
block_pattern_1 = random.choice([[0 for x in range(n_Rwaves + len(block_pattern_2))],
[0 for x in range(n_Rwaves + len(block_pattern_2) - 1)]])
if block_pattern_2_sum > n_Rwaves:
block_pattern_1 = [0 for x in range(
n_Rwaves + len(block_pattern_2) - 1)]
block_pattern_3 = [0 for x in range(n_Rwaves)]
return [block_pattern_1, block_pattern_2, block_pattern_3]
if block_type == "2a":
block_pattern_2 = [np.random.randint(1, 8)]
block_pattern_2_sum = block_pattern_2[0]
while block_pattern_2_sum < n_Rwaves:
current_block_ratio = block_pattern_2[-1]
block_pattern_2.append(np.random.randint(
np.amax([1, current_block_ratio-3]),
np.amin([8, current_block_ratio+4])))
block_pattern_2_sum += block_pattern_2[-1]
if block_pattern_2_sum == n_Rwaves:
block_pattern_1 = random.choice([[1 for x in range(n_Rwaves + len(block_pattern_2))],
[1 for x in range(n_Rwaves + len(block_pattern_2) - 1)]])
if block_pattern_2_sum > n_Rwaves:
block_pattern_1 = [1 for x in range(
n_Rwaves + len(block_pattern_2) - 1)]
block_pattern_3 = [0 for x in range(n_Rwaves)]
return [block_pattern_1, block_pattern_2, block_pattern_3]
if block_type == "2b":
while True:
bp_2_choice = random.choice([1, 2])
block_pattern_2 = [np.random.randint(1, 8)]
block_pattern_2_sum = block_pattern_2[0]
while block_pattern_2_sum < 2 * n_Rwaves - 1:
current_block_ratio = block_pattern_2[-1]
block_pattern_2.append(np.random.randint(
np.amax([1, current_block_ratio-3]),
np.amin([8, current_block_ratio+4])))
block_pattern_2_sum += block_pattern_2[-1]
if block_pattern_2_sum == 2 * n_Rwaves - 1 and bp_2_choice == 1:
block_pattern_1 = random.choice([[0 for x in range(2 * n_Rwaves - 1 + len(block_pattern_2))],
[0 for x in range(2 * n_Rwaves - 1 + len(block_pattern_2) - 1)]])
break
if block_pattern_2_sum == 2 * n_Rwaves and bp_2_choice == 2:
block_pattern_1 = random.choice([[0 for x in range(2 * n_Rwaves + len(block_pattern_2))],
[0 for x in range(
2 * n_Rwaves + len(block_pattern_2) - 1)],
[0 for x in range(2 * n_Rwaves - 1 + len(block_pattern_2) - 1)]])
break
if block_pattern_2_sum > 2 * n_Rwaves and bp_2_choice == 2:
if block_pattern_2_sum - block_pattern_2[-1] == 2 * n_Rwaves - 1:
block_pattern_1 = [0 for x in range(
2 * n_Rwaves - 1 + len(block_pattern_2))]
break
if block_pattern_2_sum - block_pattern_2[-1] < 2 * n_Rwaves - 1:
block_pattern_1 = random.choice([[0 for x in range(2 * n_Rwaves - 1 + len(block_pattern_2) - 1)],
[0 for x in range(2 * n_Rwaves + len(block_pattern_2) - 1)]])
break
block_pattern_3 = [1 for x in range(n_Rwaves)]
return [block_pattern_1, block_pattern_2, block_pattern_3]
if block_type == "2c":
while True:
block_pattern_2 = [np.random.randint(0, 2)]
block_pattern_2_sum = 1
while block_pattern_2_sum != n_Rwaves:
block_pattern_2.append(np.random.randint(0, 2))
block_pattern_2_sum += 1
bp_choice_1 = random.choice([1, 2])
block_pattern_1 = [np.random.randint(1, 3)]
block_pattern_1_sum = block_pattern_1[0]
repeat = False
while True:
if block_pattern_2[-1] == 0:
if block_pattern_1_sum == n_Rwaves + sum(block_pattern_2):
break
if block_pattern_1_sum > n_Rwaves + sum(block_pattern_2):
repeat = True
break
if block_pattern_2[-1] == 1:
if block_pattern_1_sum == n_Rwaves + sum(block_pattern_2) and bp_choice_1 == 1:
break
if block_pattern_1_sum > n_Rwaves + sum(block_pattern_2) and bp_choice_1 == 1:
repeat = True
break
if block_pattern_1_sum == n_Rwaves + sum(block_pattern_2) - 1 and bp_choice_1 == 2:
break
if block_pattern_1_sum > n_Rwaves + sum(block_pattern_2) - 1 and bp_choice_1 == 2:
repeat = True
break
block_pattern_1.append(np.random.randint(1, 3))
block_pattern_1_sum += block_pattern_1[-1]
if not repeat:
break
block_pattern_3 = [0 for x in range(n_Rwaves)]
return [block_pattern_1, block_pattern_2, block_pattern_3]
if block_type == "3":
while True:
block_pattern_3 = [np.random.randint(0, 2)]
block_pattern_3_sum = 1
while block_pattern_3_sum != n_Rwaves:
block_pattern_3.append(np.random.randint(0, 2))
block_pattern_3_sum += 1
bp_choice_2 = random.choice([1, 2])
block_pattern_2 = [np.random.randint(0, 2)]
block_pattern_2_sum = 1
while True:
if block_pattern_3[-1] == 0:
if block_pattern_2_sum == n_Rwaves + sum(block_pattern_3):
break
if block_pattern_3[-1] == 1:
if block_pattern_2_sum == n_Rwaves + sum(block_pattern_3) and bp_choice_2 == 1:
break
if block_pattern_2_sum == n_Rwaves + sum(block_pattern_3) - 1 and bp_choice_2 == 2:
break
block_pattern_2.append(np.random.randint(0, 2))
block_pattern_2_sum += 1
bp_choice_1 = random.choice([1, 2])
block_pattern_1 = [np.random.randint(1, 3)]
block_pattern_1_sum = block_pattern_1[0]
repeat = False
while True:
if block_pattern_2[-1] == 0:
if block_pattern_1_sum == block_pattern_2_sum + sum(block_pattern_2):
break
if block_pattern_1_sum > block_pattern_2_sum + sum(block_pattern_2):
repeat = True
break
if block_pattern_2[-1] == 1:
if block_pattern_1_sum == block_pattern_2_sum + sum(block_pattern_2) and bp_choice_1 == 1:
break
if block_pattern_1_sum > block_pattern_2_sum + sum(block_pattern_2) and bp_choice_1 == 1:
repeat = True
break
if block_pattern_1_sum == block_pattern_2_sum + sum(block_pattern_2) - 1 and bp_choice_1 == 2:
break
if block_pattern_1_sum > block_pattern_2_sum + sum(block_pattern_2) - 1 and bp_choice_1 == 2:
repeat = True
break
block_pattern_1.append(np.random.randint(1, 3))
block_pattern_1_sum += block_pattern_1[-1]
if not repeat:
break
return [block_pattern_1, block_pattern_2, block_pattern_3]
def get_signals_sequence_batch(batch_size, test=False, btype=0):
x = []
y = []
for i in range(batch_size):
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = np.random.randint(188, 401)
conduction_constant = np.random.randint(1, atrial_cycle_length + 1)
block_type = random.choice(["1", "2a", "2b", "2c", "3"])
#block_type = btype
block_pattern = generate_block_pattern_alt(block_type, n_Rwaves)
block_pattern_extra = copy.deepcopy(block_pattern)
if block_type == "1":
intervals = simulate_type_1(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2a":
intervals = simulate_type_2a(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2b":
intervals = simulate_type_2b(block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "2c":
intervals = simulate_type_2c(block_pattern[0], block_pattern[1], atrial_cycle_length,
conduction_constant)
if block_type == "3":
intervals = simulate_type_3(block_pattern[0], block_pattern[1], block_pattern[2],
atrial_cycle_length, conduction_constant)
block_pattern = correct_bp(block_pattern, block_type, n_Rwaves)
signals = np.array(bp_to_signals(
block_pattern, block_type, n_Rwaves, fill=False))
x_i = np.zeros(194)
x_i[signals.shape[1]-6] = 1
x.append(x_i)
y_i = np.zeros(24)
y_i[:(n_Rwaves-1)] = intervals[:(n_Rwaves-1)]
y.append(y_i)
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
x += 0.1 * torch.randn(x.shape[0], x.shape[1])
y_mean = np.loadtxt(constants_folder / 'y_mean_est.csv')
y_std = np.loadtxt(constants_folder / 'y_std_est.csv')
if not test:
y = (y - y_mean) / y_std
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
assert(not np.any(np.isnan(np.array(x))))
assert(not np.any(np.isnan(np.array(y))))
if test:
return y, intervals, n_Rwaves, atrial_cycle_length, conduction_constant, block_pattern_extra, block_type
else:
return x, y
def get_signals_matching_batch(batch_size):
x = []
y = []
for i in range(batch_size):
n_Rwaves = np.random.randint(6, 26)
atrial_cycle_length = | np.random.randint(188, 401) | numpy.random.randint |
from __future__ import division
from numpy import argsort, array, cross, dot, float64, mean
from numpy.linalg import svd
from pynurbs.geometry.checker import CheckGeom
from pynurbs.geometry.intersector import IntersectGeom
from pynurbs.geometry.line import Line
from pynurbs.geometry.methods.geom_utils import angle_between_vecs
from pynurbs.geometry.methods.misc import is_local_domain
from pynurbs.geometry.plane import Plane
from pynurbs.geometry.point import Point
from pynurbs.geometry.projector import ProjectGeom
from pynurbs.geometry.system import System
from pynurbs.geometry.vector import Vector
def vector_by_points(p0, p1, p2=None):
"""
Create a vector defined by two or three points.
:param p0: Origin of plane.
:type p0: :class:`.Point` or array_like
:param p1: Point defining vector *p1* - *p0*.
:type p1: :class:`.Point` or array_like
:param p2: Point defining vector *p2* - *p0*.
:type p2: :class:`.Point` or array_like
:return: A vector from *p0* to *p1* if only two points are provided,
or a vector defined by the cross product of *p10* x *p20* if all three
points are provided.
:rtype: :class:`.Vector`
"""
if not isinstance(p0, Point):
p0 = Point(p0)
if not isinstance(p1, Point):
p1 = Point(p1)
# Cross product if three points are provided.
if p2 is not None:
if not isinstance(p2, Point):
p2 = Point(p2)
v10 = p1.xyz - p0.xyz
v20 = p2.xyz - p0.xyz
vn = cross(v10, v20)
return Vector(vn, p0)
# Straight vector if two points are provided.
return Vector(p1.xyz - p0.xyz, p0)
def vector_by_axis(axis, origin):
"""
Create a vector along the specified axis.
:param str axis: Axis ('x', 'y', or 'z').
:param array_like origin: Origin of vector.
:return: Vector along specified axis.
:rtype: :class:`.Vector`
"""
if not isinstance(origin, Point):
origin = Point(origin)
if not isinstance(axis, str):
return None
if axis.lower() not in ['x', 'y', 'z']:
return None
if axis.lower() in ['x']:
return Vector([1., 0., 0.], origin)
if axis.lower() in ['y']:
return Vector([0., 1., 0.], origin)
if axis.lower() in ['z']:
return Vector([0., 0., 1.], origin)
def line_by_points(p0, p1):
"""
Create a line defined by two points.
:param p0: Origin of plane.
:type p0: :class:`.Point` or array_like
:param p1: Point defining vector *p1* - *p0*.
:type p1: :class:`.Point` or array_like
:return: A line defined by an oriign at *p0* and a vector *p10*.
:rtype: :class:`.Line`
"""
if not isinstance(p0, Point):
p0 = Point(p0)
v = vector_by_points(p0, p1)
return Line(p0, v)
def plane_by_points(p0, p1, p2):
"""
Create a plane defined by three points.
:param p0: Origin of plane.
:type p0: :class:`.Point` or array_like
:param p1: Point defining vector *p1* - *p0*.
:type p1: :class:`.Point` or array_like
:param p2: Point defining vector *p2* - *p0*.
:type p2: :class:`.Point` or array_like
:return: A plane with a normal vector defined by the cross product of
*p10* x *p20* and an x-axis oriented towards *p1*
:rtype: :class:`.Plane`
"""
if not isinstance(p0, Point):
p0 = Point(p0)
if not isinstance(p1, Point):
p1 = Point(p1)
if not isinstance(p2, Point):
p2 = Point(p2)
v10 = p1.xyz - p0.xyz
v20 = p2.xyz - p0.xyz
vn = cross(v10, v20)
vv = cross(vn, v10)
vu = cross(vv, vn)
vu = Vector(vu, p0)
vv = Vector(vv, p0)
vn = Vector(vn, p0)
return Plane(p0, vn, vu, vv)
def fit_plane(pnts, tol=None):
"""
Fit a plane to a scattered set of points.
:param pnts: Points to fit (at least 3 points are required).
:type pnts: list of :class:`.Point` instances or array_like
:param float tol: Tolerance for checking the fit. If *None* is
provided then the plane will be fit to the points. If a float is
provided then the plane will not be created if the distance from
any points is greater than *tol*.
:return: Plane that best fits data.
:rtype: :class:`.Plane`
"""
# Convert points to array.
pnts = array(pnts, dtype=float64)
if pnts.shape[0] < 3:
return None
# Calculate average to use as the plane origin.
p0 = mean(pnts, axis=0)
# Move points to centroid.
pc = pnts - p0
# Use SVD.
u, s, v = svd(pc, False)
# Check that points are not on a line
if abs(s[2] - s[1]) <= 1.0e-12:
return None
# Find min and max values that define normal vector and major and minor
# axes of the plane.
indx = argsort(s)
vn = v[indx[0]]
vv = v[indx[1]]
vu = v[indx[2]]
# Create plane.
p0 = Point(p0)
vu = Vector(vu, p0)
vv = Vector(vv, p0)
vn = Vector(vn, p0)
plane = Plane(p0, vn, vu, vv)
if tol is None:
return plane
# Check distance to each point.
for pi in pnts:
if abs(plane.dist2pnt(pi)) > tol:
return None
return plane
def plane_by_axes(p0, axes, sys=None):
"""
Create a plane defined by an origin and standard axes.
:param p0: Origin of plane.
:type p0: :class:`.Point` or array_like
:param axes: Standard axes, one of 'xy', 'xz', or 'yz'.
:param sys: Reference system for axes.
:type sys: :class:`.System`
:return: Plane oriented by axes.
:rtype: :class:`.Plane`
"""
if not isinstance(axes, str):
return None
if axes.lower() not in ['xy', 'yx', 'xz', 'zx', 'yz', 'zy']:
return None
if not isinstance(p0, Point):
p0 = Point(p0)
vx = array([1., 0., 0.], dtype=float64)
vy = array([0., 1., 0.], dtype=float64)
vz = array([0., 0., 1.], dtype=float64)
if CheckGeom.is_system(sys):
vx = sys.vx.ijk
vy = sys.vy.ijk
vz = sys.vz.ijk
if axes.lower() in ['xy', 'yx']:
p1 = p0.xyz + vx
p2 = p0.xyz + vy
return plane_by_points(p0, p1, p2)
if axes.lower() in ['xz', 'zx']:
p1 = p0.xyz + vz
p2 = p0.xyz + vx
return plane_by_points(p0, p1, p2)
if axes.lower() in ['yz', 'zy']:
p1 = p0.xyz + vy
p2 = p0.xyz + vz
return plane_by_points(p0, p1, p2)
def planes_by_offset(plane, offset, n):
"""
Create planes by offsetting an original plane.
:param plane: Plane to offset.
:type plane: :class:`.Plane`
:param float offset: Distance to offset.
:param int n: Number of planes to generate.
:return: List of planes offset from original.
:rtype: list
"""
if not isinstance(plane, Plane):
return None
if n <= 0:
n = 1
planes = []
for i in range(1, n + 1):
planes.append(plane.offset(w=i * offset))
return planes
def plane_by_normal(p0, vn):
"""
Create a plane by an origin and normal vector.
:param p0: Origin of plane.
:type p0: :class:`.Point` or array_like
:param vn: Normal vector of plane.
:type vn: :class:`.Vector` or array_like
:return: Plane with given origin and normal vector.
:rtype: :class:`.Plane`
"""
if not isinstance(p0, Point):
p0 = Point(p0)
if not isinstance(vn, Vector):
vn = Vector(vn, p0)
# Try x-axis.
vu = cross([1., 0., 0.], vn.vxyz)
if dot(vu, vu) > 0.:
vv = Vector(cross(vu, vn.vxyz), p0)
vu = Vector(vu, p0)
return Plane(p0, vn, vu, vv)
# Try y-axis.
vu = cross([0., 1., 0.], vn.vxyz)
if dot(vu, vu) > 0.:
vv = Vector(cross(vu, vn.vxyz), p0)
vu = Vector(vu, p0)
return Plane(p0, vn, vu, vv)
# Try z-axis.
vu = cross([0., 0., 1.], vn.vxyz)
if dot(vu, vu) > 0.:
vv = Vector( | cross(vu, vn.vxyz) | numpy.cross |
# module for processing networkx graphs in various ways
import pandas as pd
import numpy as np
import csv
import gzip
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import pymaid
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
import networkx.utils as nxu
class Analyze_Nx_G():
def __init__(self, edges, graph_type='directed', split_pairs=False, graph=None, select_neurons=[]):
if(len(select_neurons)>0):
if(split_pairs==False):
indices_us = [True if x in select_neurons else False for x in edges.upstream_pair_id.to_list()]
indices_ds = [True if x in select_neurons else False for x in edges.downstream_pair_id.to_list()]
edges = edges.loc[np.logical_and(indices_us, indices_ds), :]
if(split_pairs):
indices_us = [True if x in select_neurons else False for x in edges.upstream_skid.to_list()]
indices_ds = [True if x in select_neurons else False for x in edges.downstream_skid.to_list()]
edges = edges.loc[np.logical_and(indices_us, indices_ds), :]
if(graph==None):
self.edges = edges
self.G = self.generate_graph(graph_type, split_pairs=split_pairs)
if(graph!=None):
self.G = graph
self.edges = graph.edges
def generate_graph(self, graph_type, split_pairs=False):
edges = self.edges
if(split_pairs==False):
if(graph_type=='directed'):
graph = nx.DiGraph()
for i in range(len(edges)):
graph.add_edge(edges.iloc[i].upstream_pair_id, edges.iloc[i].downstream_pair_id,
weight = np.mean([edges.iloc[i].left, edges.iloc[i].right]),
edge_type = edges.iloc[i].type)
if(graph_type=='undirected'):
graph = nx.Graph()
for i in range(len(edges)):
if(edges.iloc[i].upstream_pair_id == edges.iloc[i].downstream_pair_id): # remove self-edges
continue
if(edges.iloc[i].upstream_pair_id != edges.iloc[i].downstream_pair_id):
if((edges.iloc[i].upstream_pair_id, edges.iloc[i].downstream_pair_id) not in graph.edges):
graph.add_edge(edges.iloc[i].upstream_pair_id, edges.iloc[i].downstream_pair_id)
if(split_pairs):
if(graph_type=='directed'):
graph = nx.DiGraph()
for i in range(len(edges)):
graph.add_edge(edges.iloc[i].upstream_skid, edges.iloc[i].downstream_skid,
weight = edges.iloc[i].edge_weight,
edge_type = edges.iloc[i].type)
if(graph_type=='undirected'):
graph = nx.Graph()
for i in range(len(edges)):
if(edges.iloc[i].upstream_skid == edges.iloc[i].downstream_skid): # remove self-edges
continue
if(edges.iloc[i].upstream_skid != edges.iloc[i].downstream_skid):
if((edges.iloc[i].upstream_skid, edges.iloc[i].downstream_skid) not in graph.edges):
graph.add_edge(edges.iloc[i].upstream_skid, edges.iloc[i].downstream_skid)
return(graph)
# comprehensive list of in/out degrees and identification of hubs if desired
def get_node_degrees(self, hub_threshold=None):
nodes = list(self.G.nodes)
in_degree = [self.G.in_degree(node) for node in nodes]
out_degree = [self.G.out_degree(node) for node in nodes]
neurons = pd.DataFrame(zip(in_degree, out_degree), index=nodes, columns=['in_degree', 'out_degree'])
if(hub_threshold!=None):
in_hub = [1 if in_d>=hub_threshold else 0 for in_d in in_degree]
out_hub = [1 if out_d>=hub_threshold else 0 for out_d in out_degree]
in_out_hub = [1 if ((degree[0]>=hub_threshold) & (degree[1]>=hub_threshold)) else 0 for degree in zip(in_degree, out_degree)]
neurons = pd.DataFrame(zip(in_degree, out_degree, in_hub, out_hub, in_out_hub), index=nodes, columns=['in_degree', 'out_degree', 'in_hub', 'out_hub', 'in_out_hub'])
hub_type=[]
for index in range(0, len(neurons)):
if((neurons.iloc[index, :].in_hub==1) & (neurons.iloc[index, :].out_hub==0)):
hub_type.append('in_hub')
if((neurons.iloc[index, :].out_hub==1) & (neurons.iloc[index, :].in_hub==0)):
hub_type.append('out_hub')
if(neurons.iloc[index, :].in_out_hub==1):
hub_type.append('in_out_hub')
if((neurons.iloc[index, :].out_hub==0) & (neurons.iloc[index, :].in_hub==0)):
hub_type.append('non-hub')
neurons['type']=hub_type
return(neurons)
# modified some of the functions from networkx to generate multi-hop self loop paths
def empty_generator(self):
""" Return a generator with no members """
yield from ()
# modified some of the functions from networkx to generate multi-hop self loop paths
def mod_all_simple_paths(self, source, target, cutoff=None):
if source not in self.G:
raise nx.NodeNotFound(f"source node {source} not in graph")
if target in self.G:
targets = {target}
else:
try:
targets = set(target)
except TypeError as e:
raise nx.NodeNotFound(f"target node {target} not in graph") from e
if cutoff is None:
cutoff = len(self.G) - 1
if cutoff < 1:
return self.empty_generator()
else:
return self._mod_all_simple_paths_graph(source, targets, cutoff)
# modified some of the functions from networkx to generate multi-hop self loop paths
def _mod_all_simple_paths_graph(self, source, targets, cutoff):
visited = dict.fromkeys([str(source)]) # convert to str so it's ignored
stack = [iter(self.G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.popitem()
elif len(visited) < cutoff:
if (child in visited):
continue
if child in targets:
yield list(visited) + [child]
visited[child] = None
if targets - set(visited.keys()): # expand stack until find all targets
stack.append(iter(self.G[child]))
else:
visited.popitem() # maybe other ways to child
else: # len(visited) == cutoff:
for target in (targets & (set(children) | {child})) - set(visited.keys()):
yield list(visited) + [target]
stack.pop()
visited.popitem()
def all_simple_self_loop_paths(self, source, cutoff):
path = list(self.mod_all_simple_paths(source=source, target=source, cutoff=cutoff))
for i in range(len(path)):
path[i][0] = int(path[i][0]) # convert source str to int
return(path)
def partner_loop_probability(self, pairs, length):
# requires Analyze_Nx_G(..., split_pairs=True)
if(length<2):
print('length must be 2 or greater!')
return
partner_loop = []
nonpartner_loop = []
all_paths = []
for i in pairs.index:
leftid = pairs.loc[i].leftid
rightid = pairs.loc[i].rightid
paths = self.all_simple_self_loop_paths(source = leftid, cutoff=length)
paths = [path for path in paths if len(path)==(length+1)]
all_paths.append(paths)
# when loops exist
if(len(paths)>0):
loop_partners = [path[1:length] for path in paths] # collect all partners that mediate loops
if(type(loop_partners[0])==list): loop_partners = [x for sublist in loop_partners for x in sublist]
loop_partners = list( | np.unique(loop_partners) | numpy.unique |
""" Contains basic Batch classes """
import os
import traceback
import threading
import dill
try:
import blosc
except ImportError:
pass
import numpy as np
try:
import pandas as pd
except ImportError:
pass
try:
import feather
except ImportError:
pass
try:
import dask.dataframe as dd
except ImportError:
pass
from .dsindex import DatasetIndex, FilesIndex
from .decorators import action, inbatch_parallel, any_action_failed
from .dataset import Dataset
from .batch_base import BaseBatch
from .components import MetaComponentsTuple
class Batch(BaseBatch):
""" The core Batch class """
_item_class = None
components = None
def __init__(self, index, preloaded=None, *args, **kwargs):
if self.components is not None and not isinstance(self.components, tuple):
raise TypeError("components should be a tuple of strings with components names")
super().__init__(index, *args, **kwargs)
self._preloaded_lock = threading.Lock()
self._preloaded = preloaded
self._local = None
self._pipeline = None
@property
def pipeline(self):
""": Pipeline - a pipeline the batch is being used in """
if self._local is not None and hasattr(self._local, 'pipeline'):
return self._local.pipeline
else:
return self._pipeline
@pipeline.setter
def pipeline(self, val):
""" Store pipeline in a thread-local storage """
if val is None:
self._local = None
else:
if self._local is None:
self._local = threading.local()
self._local.pipeline = val
self._pipeline = val
def deepcopy(self):
""" Return a deep copy of the batch.
Constructs a new ``Batch`` instance and then recursively copies all
the objects found in the original batch, except the ``pipeline``,
which remains unchanged.
Returns
-------
Batch
"""
pipeline = self.pipeline
self.pipeline = None
dump_batch = dill.dumps(self)
self.pipeline = pipeline
restored_batch = dill.loads(dump_batch)
restored_batch.pipeline = pipeline
return restored_batch
@classmethod
def from_data(cls, index, data):
""" Create batch from a given dataset """
# this is roughly equivalent to self.data = data
if index is None:
index = np.arange(len(data))
return cls(index, preloaded=data)
@classmethod
def from_batch(cls, batch):
""" Create batch from another batch """
return cls(batch.index, preloaded=batch._data) # pylint: disable=protected-access
@classmethod
def merge(cls, batches, batch_size=None):
""" Merge several batches to form a new batch of a given size
Parameters
----------
batches : tuple of batches
batch_size : int or None
if `None`, just merge all batches into one batch (the rest will be `None`),
if `int`, then make one batch of `batch_size` and a batch with the rest of data.
Returns
-------
batch, rest : tuple of two batches
"""
def _make_index(data):
return DatasetIndex(np.arange(data.shape[0])) if data is not None and data.shape[0] > 0 else None
def _make_batch(data):
index = _make_index(data[0])
return cls(index, preloaded=tuple(data)) if index is not None else None
if batch_size is None:
break_point = len(batches)
last_batch_len = len(batches[-1])
else:
break_point = -1
last_batch_len = 0
cur_size = 0
for i, b in enumerate(batches):
cur_batch_len = len(b)
if cur_size + cur_batch_len >= batch_size:
break_point = i
last_batch_len = batch_size - cur_size
break
else:
cur_size += cur_batch_len
last_batch_len = cur_batch_len
components = batches[0].components or (None,)
new_data = list(None for _ in components)
rest_data = list(None for _ in components)
for i, comp in enumerate(components):
if batch_size is None:
new_comp = [b.get(component=comp) for b in batches[:break_point]]
else:
b = batches[break_point]
last_batch_len_ = b.get_pos(None, comp, b.indices[last_batch_len - 1])
new_comp = [b.get(component=comp) for b in batches[:break_point]] + \
[batches[break_point].get(component=comp)[:last_batch_len_ + 1]]
new_data[i] = cls.merge_component(comp, new_comp)
if batch_size is not None:
rest_comp = [batches[break_point].get(component=comp)[last_batch_len_ + 1:]] + \
[b.get(component=comp) for b in batches[break_point + 1:]]
rest_data[i] = cls.merge_component(comp, rest_comp)
new_batch = _make_batch(new_data)
rest_batch = _make_batch(rest_data)
return new_batch, rest_batch
@classmethod
def merge_component(cls, component=None, data=None):
""" Merge the same component data from several batches """
_ = component
if isinstance(data[0], np.ndarray):
return np.concatenate(data)
else:
raise TypeError("Unknown data type", type(data[0]))
def as_dataset(self, dataset=None):
""" Makes a new dataset from batch data
Parameters
----------
dataset: could be a dataset or a Dataset class
Returns
-------
an instance of a class specified by `dataset` arg, preloaded with this batch data
"""
if dataset is None:
dataset_class = Dataset
elif isinstance(dataset, Dataset):
dataset_class = dataset.__class__
elif isinstance(dataset, type):
dataset_class = dataset
else:
raise TypeError("dataset should be some Dataset class or an instance of some Dataset class or None")
return dataset_class(self.index, batch_class=type(self), preloaded=self.data)
@property
def indices(self):
""": numpy array - an array with the indices """
if isinstance(self.index, DatasetIndex):
return self.index.indices
return self.index
def __len__(self):
return len(self.index)
@property
def size(self):
""": int - number of items in the batch """
return len(self.index)
@property
def data(self):
""": tuple or named components - batch data """
if self._data is None and self._preloaded is not None:
# load data the first time it's requested
with self._preloaded_lock:
if self._data is None and self._preloaded is not None:
self.load(src=self._preloaded)
res = self._data if self.components is None else self._data_named
return res if res is not None else self._empty_data
def make_item_class(self, local=False):
""" Create a class to handle data components """
# pylint: disable=protected-access
if self.components is None:
type(self)._item_class = None
elif type(self)._item_class is None or not local:
comp_class = MetaComponentsTuple(type(self).__name__ + 'Components', components=self.components)
type(self)._item_class = comp_class
else:
comp_class = MetaComponentsTuple(type(self).__name__ + 'Components' + str(id(self)),
components=self.components)
self._item_class = comp_class
@action
def add_components(self, components, init=None):
""" Add new components
Parameters
----------
components : str or list
new component names
init : array-like
initial component data
"""
if isinstance(components, str):
components = (components,)
init = (init,)
elif isinstance(components, list):
components = tuple(components)
data = self._data
if self.components is None:
self.components = components
data = tuple()
else:
self.components = self.components + components
data = data + tuple(init)
self.make_item_class(local=True)
self._data = data
return self
def __getstate__(self):
state = self.__dict__.copy()
state.pop('_data_named')
return state
def __setstate__(self, state):
for k, v in state.items():
# this warrants that all hidden objects are reconstructed upon unpickling
setattr(self, k, v)
@property
def _empty_data(self):
return None if self.components is None else self._item_class() # pylint: disable=not-callable
def get_pos(self, data, component, index):
""" Return a position in data for a given index
Parameters
----------
data : some array or tuple of arrays
if `None`, should return a position in :attr:`self.data <.Batch.data>`
components : None, int or str
- None - data has no components (e.g. just an array or pandas.DataFrame)
- int - a position of a data component, when components names are not defined
(e.g. data is a tuple)
- str - a name of a data component
index : any
an index id
Returns
-------
int
a position in a batch data where an item with a given index is stored
Notes
-----
It is used to read / write data from / to a given component::
batch_data = data.component[pos]
data.component[pos] = new_data
if `self.data` holds a numpy array, then get_pos(None, None, index) should
just return `self.index.get_pos(index)`
if `self.data.images` contains BATCH_SIZE images as a numpy array,
then `get_pos(None, 'images', index)` should return `self.index.get_pos(index)`
if `self.data.labels` is a dict {index: label}, then `get_pos(None, 'labels', index)` should return index.
if `data` is not `None`, then you need to know in advance how to get a position for a given index.
For instance, `data` is a large numpy array, and a batch is a subset of this array and
`batch.index` holds row numbers from a large arrays.
Thus, `get_pos(data, None, index)` should just return index.
A more complicated example of data:
- batch represent small crops of large images
- `self.data.source` holds a few large images (e.g just 5 items)
- `self.data.coords` holds coordinates for crops (e.g. 100 items)
- `self.data.image_no` holds an array of image numbers for each crop (so it also contains 100 items)
then `get_pos(None, 'source', index)` should return `self.data.image_no[self.index.get_pos(index)]`.
Whilst, `get_pos(data, 'source', index)` should return `data.image_no[index]`.
"""
_ = component
if data is None:
pos = self.index.get_pos(index)
else:
pos = index
return pos
def __getattr__(self, name):
if self.components is not None and name in self.components: # pylint: disable=unsupported-membership-test
attr = getattr(self.data, name)
return attr
else:
raise AttributeError("%s not found in class %s" % (name, self.__class__.__name__))
def __setattr__(self, name, value):
if self.components is not None:
if name == "_data":
super().__setattr__(name, value)
if self._item_class is None:
self.make_item_class()
self._data_named = self._item_class(data=self._data) # pylint: disable=not-callable
elif name in self.components: # pylint: disable=unsupported-membership-test
if self._data_named is None:
_ = self.data
setattr(self._data_named, name, value)
super().__setattr__('_data', self._data_named.data)
else:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def put_into_data(self, data, components=None):
""" Load data into :attr:`_data` property """
if self.components is None:
_src = data
else:
_src = data if isinstance(data, tuple) or data is None else tuple([data])
_src = self.get_items(self.indices, _src)
if components is None:
self._data = _src
else:
components = [components] if isinstance(components, str) else components
for i, comp in enumerate(components):
if isinstance(_src, dict):
comp_src = _src[comp]
else:
comp_src = _src[i]
setattr(self, comp, comp_src)
def get_items(self, index, data=None, components=None):
""" Return one or several data items from a data source """
if data is None:
_data = self.data
else:
_data = data
if components is None:
components = self.components
if self._item_class is not None and isinstance(_data, self._item_class):
pos = [self.get_pos(None, comp, index) for comp in components] # pylint: disable=not-an-iterable
res = self._item_class(data=_data, pos=pos) # pylint: disable=not-callable
elif isinstance(_data, tuple):
comps = components if components is not None else range(len(_data))
res = tuple(data_item[self.get_pos(data, comp, index)] if data_item is not None else None
for comp, data_item in zip(comps, _data))
elif isinstance(_data, dict):
res = dict(zip(components, (_data[comp][self.get_pos(data, comp, index)] for comp in components)))
else:
pos = self.get_pos(data, None, index)
res = _data[pos]
return res
def get(self, item=None, component=None):
""" Return an item from the batch or the component """
if item is None:
if component is None:
res = self.data
else:
res = getattr(self, component)
else:
if component is None:
res = self[item]
else:
res = self[item]
res = getattr(res, component)
return res
def __getitem__(self, item):
return self.get_items(item)
def __iter__(self):
for item in self.indices:
yield self[item]
@property
def items(self):
""": list - batch items """
return [[self[ix]] for ix in self.indices]
def run_once(self, *args, **kwargs):
""" Init function for no parallelism
Useful for async action-methods (will wait till the method finishes)
"""
_ = self.data, args, kwargs
return [[]]
def get_model_by_name(self, model_name):
""" Return a model specification given its name """
return self.pipeline.get_model_by_name(model_name, batch=self)
def get_errors(self, all_res):
""" Return a list of errors from a parallel action """
all_errors = [error for error in all_res if isinstance(error, Exception)]
return all_errors if len(all_errors) > 0 else None
@action
def do_nothing(self, *args, **kwargs):
""" An empty action (might be convenient in complicated pipelines) """
_ = args, kwargs
return self
@action
@inbatch_parallel(init='indices', post='_assemble')
def apply_transform(self, ix, func, *args, src=None, dst=None, p=1., use_self=False, **kwargs):
""" Apply a function to each item in the batch
Parameters
----------
func : callable
a function to apply to each item from the source
src : str, sequence, list of str
the source to get data from, can be:
- None
- str - a component name, e.g. 'images' or 'masks'
- sequence - a numpy-array, list, etc
- list of str - get data from several components
dst : str or array
the destination to put the result in, can be:
- None
- str - a component name, e.g. 'images' or 'masks'
- array-like - a numpy-array, list, etc
args, kwargs
parameters passed to ``func``
Notes
-----
apply_transform does the following (but in parallel)::
for item in range(len(batch)):
self.dst[item] = func(self.src[item], *args, **kwargs)
"""
if src is None:
_args = args
else:
if isinstance(src, str):
pos = self.get_pos(None, src, ix)
src_attr = (getattr(self, src)[pos],)
elif isinstance(src, list) and np.all([isinstance(component, str) for component in src]):
src_attr = [getattr(self, component)[self.get_pos(None, component, ix)] for component in src]
else:
pos = self.get_pos(None, dst, ix)
src_attr = (src[pos],)
_args = tuple([*src_attr, *args])
if np.random.binomial(1, p):
if use_self:
return func(self, *_args, **kwargs)
return func(*_args, **kwargs)
if len(src_attr) == 1:
return src_attr[0]
return src_attr
@action
def apply_transform_all(self, func, *args, src=None, dst=None, p=1., use_self=False, **kwargs):
""" Apply a function the whole batch at once
Parameters
----------
func : callable
a function to apply to each item from the source
src : str or array
the source to get data from, can be:
- str - a component name, e.g. 'images' or 'masks'
- array-like - a numpy-array, list, etc
dst : str or array
the destination to put the result in, can be:
- None
- str - a component name, e.g. 'images' or 'masks'
- array-like - a numpy-array, list, etc
p : float
probability of applying transform to an element in the batch
args, kwargs
parameters passed to ``func``
Notes
-----
apply_transform_all does the following::
self.dst = func(self.src, *args, **kwargs)
"""
if not isinstance(dst, str) and not isinstance(src, str):
raise TypeError("At least of of dst and src should be attribute names, not arrays")
if src is None:
_args = args
else:
if isinstance(src, str):
src_attr = getattr(self, src)
else:
src_attr = src
_args = tuple([src_attr, *args])
indices = np.where(np.random.binomial(1, p, len(self)))[0]
if len(indices):
if use_self:
tr_res = func(self, indices=indices, *_args, **kwargs)
else:
tr_res = func(indices=indices, *_args, **kwargs)
else:
tr_res = src_attr
if dst is None:
pass
elif isinstance(dst, str):
setattr(self, dst, tr_res)
else:
dst[:] = tr_res
return self
def _get_file_name(self, ix, src, ext):
if src is None:
if isinstance(self.index, FilesIndex):
src = self.index.get_fullpath(ix)
if self.index.dirs:
file_name = os.path.join(src, 'data.' + ext)
else:
file_name = src + '.' + ext
else:
raise ValueError("File locations must be specified to dump/load data")
else:
file_name = os.path.join(os.path.abspath(src), str(ix) + '.' + ext)
return file_name
def _assemble_component(self, result, *args, component, **kwargs):
""" Assemble one component after parallel execution.
Parameters
----------
result : sequence, np.ndarray
Values to put into ``component``
component : str
Component to assemble.
"""
_ = args, kwargs
try:
new_items = np.stack(result)
except ValueError as e:
message = str(e)
if "must have the same shape" in message:
new_items = np.empty(len(result), dtype=object)
new_items[:] = result
else:
raise e
setattr(self, component, new_items)
def _assemble(self, all_results, *args, dst=None, **kwargs):
""" Assembles the batch after a parallel action.
Parameters
----------
all_results : sequence
Results after inbatch_parallel.
dst : str, sequence, np.ndarray
Components to assemble
Returns
-------
self
"""
_ = args
if any_action_failed(all_results):
all_errors = self.get_errors(all_results)
print(all_errors)
traceback.print_tb(all_errors[0].__traceback__)
raise RuntimeError("Could not assemble the batch")
if dst is None:
dst = kwargs.get('components', self.components)
if not isinstance(dst, (list, tuple, np.ndarray)):
dst = [dst]
if len(dst) == 1:
all_results = [all_results]
else:
all_results = list(zip(*all_results))
for component, result in zip(dst, all_results):
self._assemble_component(result, component=component, **kwargs)
return self
@inbatch_parallel('indices', post='_assemble', target='f')
def _load_blosc(self, ix, src=None, components=None):
""" Load data from a blosc packed file """
file_name = self._get_file_name(ix, src, 'blosc')
with open(file_name, 'rb') as f:
data = dill.loads(blosc.decompress(f.read()))
if self.components is None:
components = (data.keys()[0],)
else:
components = tuple(components or self.components)
item = tuple(data[i] for i in components)
return item
@inbatch_parallel('indices', target='f')
def _dump_blosc(self, ix, dst, components=None):
""" Save blosc packed data to file """
file_name = self._get_file_name(ix, dst, 'blosc')
with open(file_name, 'w+b') as f:
if self.components is None:
components = (None,)
item = (self[ix],)
else:
components = tuple(components or self.components)
item = self[ix].as_tuple(components)
data = dict(zip(components, item))
f.write(blosc.compress(dill.dumps(data)))
def _load_table(self, src, fmt, components=None, post=None, *args, **kwargs):
""" Load a data frame from table formats: csv, hdf5, feather """
if fmt == 'csv':
if 'index_col' in kwargs:
index_col = kwargs.pop('index_col')
_data = pd.read_csv(src, *args, **kwargs).set_index(index_col)
else:
_data = pd.read_csv(src, *args, **kwargs)
elif fmt == 'feather':
_data = feather.read_dataframe(src, *args, **kwargs) # pylint: disable=redefined-variable-type
elif fmt == 'hdf5':
_data = pd.read_hdf(src, *args, **kwargs) # pylint: disable=redefined-variable-type
# Put into this batch only part of it (defined by index)
if isinstance(_data, pd.DataFrame):
_data = _data.loc[self.indices]
elif isinstance(_data, dd.DataFrame):
# dask.DataFrame.loc supports advanced indexing only with lists
_data = _data.loc[list(self.indices)].compute()
if callable(post):
_data = post(_data, src=src, fmt=fmt, components=components, **kwargs)
else:
components = tuple(components or self.components)
_new_data = dict()
for i, comp in enumerate(components):
_new_data[comp] = _data.iloc[:, i].values
_data = _new_data
for comp, values in _data.items():
setattr(self, comp, values)
@action(use_lock='__dump_table_lock')
def _dump_table(self, dst, fmt='feather', components=None, *args, **kwargs):
""" Save batch data to table formats
Args:
dst: str - a path to dump into
fmt: str - format: feather, hdf5, csv
components: str or tuple - one or several component names
"""
filename = dst
components = tuple(components or self.components)
data_dict = {}
for comp in components:
comp_data = self.get(component=comp)
if isinstance(comp_data, pd.DataFrame):
data_dict.update(comp_data.to_dict('series'))
elif isinstance(comp_data, np.ndarray):
if comp_data.ndim > 1:
columns = [comp + str(i) for i in range(comp_data.shape[1])]
comp_dict = zip(columns, (comp_data[:, i] for i in range(comp_data.shape[1])))
data_dict.update({comp: comp_dict})
else:
data_dict.update({comp: comp_data})
else:
data_dict.update({comp: comp_data})
_data = pd.DataFrame(data_dict)
if fmt == 'feather':
feather.write_dataframe(_data, filename, *args, **kwargs)
elif fmt == 'hdf5':
_data.to_hdf(filename, *args, **kwargs) # pylint:disable=no-member
elif fmt == 'csv':
_data.to_csv(filename, *args, **kwargs) # pylint:disable=no-member
else:
raise ValueError('Unknown format %s' % fmt)
return self
@action
def load(self, *args, src=None, fmt=None, components=None, **kwargs):
""" Load data from another array or a file.
Parameters
----------
src :
a source (e.g. an array or a file name)
fmt : str
a source format, one of None, 'blosc', 'csv', 'hdf5', 'feather'
components : None or str or tuple of str
components to load
**kwargs :
other parameters to pass to format-specific loaders
"""
_ = args
components = [components] if isinstance(components, str) else components
if fmt is None:
self.put_into_data(src, components)
elif fmt == 'blosc':
self._load_blosc(src=src, components=components, **kwargs)
elif fmt in ['csv', 'hdf5', 'feather']:
self._load_table(src=src, fmt=fmt, components=components, **kwargs)
else:
raise ValueError("Unknown format " + fmt)
return self
@action
def dump(self, *args, dst=None, fmt=None, components=None, **kwargs):
""" Save data to another array or a file.
Parameters
----------
dst :
a destination (e.g. an array or a file name)
fmt : str
a destination format, one of None, 'blosc', 'csv', 'hdf5', 'feather'
components : None or str or tuple of str
components to load
*args :
other parameters are passed to format-specific writers
*kwargs :
other parameters are passed to format-specific writers
"""
components = [components] if isinstance(components, str) else components
if fmt is None:
if components is not None and len(components) > 1:
raise ValueError("Only one component can be dumped into a memory array: components =", components)
components = components[0] if components is not None else None
dst[self.indices] = self.get(component=components)
elif fmt == 'blosc':
self._dump_blosc(dst, components=components)
elif fmt in ['csv', 'hdf5', 'feather']:
self._dump_table(dst, fmt, components, *args, **kwargs)
else:
raise ValueError("Unknown format " + fmt)
return self
@action
def save(self, *args, **kwargs):
""" Save batch data to a file (an alias for dump method)"""
return self.dump(*args, **kwargs)
class ArrayBatch(Batch):
""" Base Batch class for array-like datasets
Batch data is a numpy array.
If components are defined, then each component data is a numpy array
"""
def _assemble_load(self, all_res, *args, **kwargs):
_ = args
if any_action_failed(all_res):
raise RuntimeError("Cannot assemble the batch", all_res)
if self.components is None:
self._data = | np.stack([res[0] for res in all_res]) | numpy.stack |
import numpy as np
from petsc4py import PETSc
from src.geo import *
from src import stokes_flow as sf
from src.support_class import *
from src.StokesFlowMethod import *
__all__ = ['createEcoli_ellipse', 'createEcoliComp_ellipse', 'createEcoli_2tails',
'createEcoliComp_tunnel', 'createEcoli_tunnel', 'create_ecoli_dualTail',
'create_ecoli_2part', 'create_ecoli_tail', 'create_ecoli_tail_at',
'create_rotlets_tail_2part', 'create_selfRepeat_tail',
'create_ecoli_2part_at', 'create_ecoli_dualTail_at',
'get_tail_nodes_split_at', 'get_ecoli_nodes_split_at',
'get_ecoli_nodes_2part_at', 'get_tail_at', 'get_ellipsoid_at',
'create_diskVane_tail',
'create_capsule',
'create_rod',
'create_infHelix',
'create_helicoid_list', 'create_helicoid_comp',
'creat_dumb_obj',
'creat_helicoid_dumb', 'creat_helicoid_dumb_v2', 'creat_helicoid_dumb_selfRotate',
'obj2helicoid_list', 'obj2helicoid_list_v2', 'obj2helicoid_list_v3',
'obj2helicoid_comp', 'obj2helicoid_list_selfRotate',
'create_sphere', 'create_move_single_sphere',
'create_one_ellipse', 'create_one_ellipse_v2']
def create_capsule(rs1, rs2, ls, ds, node_dof=3):
lvs3 = ls - 2 * rs2
dth = ds / rs2
err_msg = 'geo parameter of create_capsule head is wrong. '
assert lvs3 >= 0, err_msg
vsgeo = base_geo()
vsgeo.set_dof(node_dof)
vsgeo1 = ellipse_base_geo() # velocity node geo of head
vsgeo1.create_half_delta(ds, rs1, rs2)
vsgeo2 = vsgeo1.copy()
vsgeo1.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
vsgeo1.node_rotation(norm=np.array((0, 0, 1)), theta=-np.pi / 2)
vsgeo1.move((0, 0, +lvs3 / 2))
vsgeo2.node_rotation(norm=np.array((0, 1, 0)), theta=+np.pi / 2)
vsgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=+np.pi / 2 - dth)
vsgeo2.move((0, 0, -lvs3 / 2))
vsgeo2.set_nodes(np.flipud(vsgeo2.get_nodes()), deltalength=vsgeo2.get_deltaLength())
if lvs3 > ds:
vsgeo3 = tunnel_geo()
vsgeo3.create_deltatheta(dth=dth, radius=rs2, length=lvs3)
vsgeo.combine([vsgeo1, vsgeo3, vsgeo2])
else:
vsgeo.combine([vsgeo1, vsgeo2])
return vsgeo
def create_ecoli_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires eh==0. '
assert np.isclose(eh, 0), err_msg
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_ecoli_tail_bck(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires eh==0. '
assert np.isclose(eh, 0), err_msg
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_diskVane_tail(moveh, **kwargs):
r1 = kwargs['diskVane_r1']
r2 = kwargs['diskVane_r2']
rz = kwargs['diskVane_rz']
th_loc = kwargs['diskVane_th_loc']
# ph_loc = kwargs['diskVane_ph_loc']
ds = kwargs['diskVane_ds']
nr = kwargs['diskVane_nr']
nz = kwargs['diskVane_nz']
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo.node_rotation(norm=np.array([0, 0, 1]), theta=th_loc, rotation_origin=np.zeros(3))
tgeo.move(np.array((r1, 0, moveh)))
tgeo_list0 = []
trot = 2 * np.pi / nr
for i0 in range(nr):
th = trot * i0
tgeo2 = tgeo.copy()
tgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list0.append(tgeo2)
if np.isclose(nz, 1):
tgeo_list = tgeo_list0
else:
tgeo_list = []
tz = rz / (nz - 1)
for i0 in range(nz):
tmove = tz * i0
th = np.pi * i0
for tgeoi in tgeo_list0:
tgeoj = tgeoi.copy()
tgeoj.move(np.array((0, 0, tmove)))
tgeoj.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list.append(tgeoj)
return tgeo_list
def create_selfRepeat_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
repeat_n = kwargs['repeat_n']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires eh==0. '
assert np.isclose(eh, 0), err_msg
# create helix
vhobj0 = obj_type() # type: sf.StokesFlowObj
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = SelfRepeat_FatHelix(repeat_n) # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand) # type: SelfRepeat_FatHelix
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
tail_start_list = []
tail_body0_list = []
tail_end_list = []
for tobj in tail_list:
vhgeo0 = tobj.get_u_geo()
fhgeo0 = tobj.get_f_geo()
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_start_geo()
part_fgeo = fhgeo0.get_start_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_start')
tail_start_list.append(part_obj)
#
part_obj = sf.SelfRepeatObj()
part_ugeo = vhgeo0.get_body_mid_geo()
part_fgeo = fhgeo0.get_body_mid_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_body0')
tail_body0_list.append(part_obj)
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_end_geo()
part_fgeo = fhgeo0.get_end_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_end')
tail_end_list.append(part_obj)
return tail_list, tail_start_list, tail_body0_list, tail_end_list
def create_ecoli_tail_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
return tail_obj
def get_tail_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
n_tail = problem_kwargs['n_tail']
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
return t1, t2
def get_tail_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
return [tail_obj.get_u_nodes(), ]
def createEcoli_ellipse(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - 2 * rs1 + lh) + rs1
movehz = 0.5 * (dist_hs + 2 * rs1 - lh) + lh / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((0, 0, -movehz)) + center # move distance of helix
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
# create tail
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.set_geo_norm(vsgeo.get_geo_norm() * -1)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
return vsobj, tail_list
def createEcoli_2tails(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
# create tail
movez = np.array((0, 0, rs1 + dist_hs + lh / 2))
tkwargs = kwargs.copy()
tkwargs['left_hand'] = False
tail_list1 = create_ecoli_tail(-movez, **tkwargs)
tkwargs['left_hand'] = True
tail_list2 = create_ecoli_tail(movez, **tkwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
return vsobj, tail_list1, tail_list2
def createEcoliComp_ellipse(name='...', **kwargs):
vsobj, tail_list = createEcoli_ellipse(name=name, **kwargs)
vsgeo = vsobj.get_u_geo()
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
ecoli_comp = sf.ForceFreeComposite(center=center.copy(), norm=vsgeo.get_geo_norm().copy(),
name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
rot_norm = kwargs['rot_norm']
rot_theta = kwargs['rot_theta'] * np.pi
ecoli_comp.node_rotation(norm=rot_norm.copy(), theta=rot_theta, rotation_origin=center.copy())
return ecoli_comp
def createEcoli_tunnel(**kwargs):
ch = kwargs['ch']
rh1 = kwargs['rh1']
rh2 = kwargs['rh2']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
ls = kwargs['ls']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor']
dist_hs = kwargs['dist_hs']
center = kwargs['center']
rT1 = kwargs['rT1']
rT2 = kwargs['rT2']
ntT = kwargs['ntT']
eT = kwargs['eT']
Tfct = kwargs['Tfct']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - ls + lh) + ls / 2
movehz = -1 * (0.5 * (dist_hs + ls - lh) + lh / 2)
# movesz = (ls + dist_hs) / 2
# movehz = (lh + dist_hs) / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((rT1 - rh1, 0, movehz)) + center # move distance of helix
lT = (rT1 + rh2) * 2
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
err_msg = 'the regularized family methods requires eT==0. '
assert np.isclose(eT, 0), err_msg
# create helix
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsobj = objtype()
node_dof = vsobj.get_n_unknown()
vsgeo = create_capsule(rs1, rs2, ls, ds, node_dof)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
fsgeo.node_zoom_z(1 - ds / (0.5 * (rs1 + rs2)) * es)
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
# create T shape
dtT = 2 * np.pi / ntT
vTobj = objtype()
node_dof = vTobj.get_n_unknown()
# # dbg
# OptDB = PETSc.Options( )
# factor = OptDB.getReal('dbg_move_factor', 1)
# PETSc.Sys.Print('--------------------> DBG: dbg_move_factor = %f' % factor)
# moveT = np.array((0, 0, moveh[-1] + lh / 2 + rh2 * factor))
moveT = np.array((0, 0, movehz + lh / 2)) + center
vTgeo = tunnel_geo()
if 'dualPotential' in matrix_method:
vTgeo.set_check_epsilon(False)
vTgeo.set_dof(node_dof)
fTgeo = vTgeo.create_deltatheta(dth=dtT, radius=rT2, factor=Tfct, length=lT, epsilon=eT,
with_cover=1)
vTobj.set_data(fTgeo, vTgeo, name='T_shape_0')
theta = -np.pi / 2
vTobj.node_rotation(norm=np.array((0, 1, 0)), theta=theta)
vTobj.zoom(zoom_factor)
vTobj.move(moveT * zoom_factor)
theta = np.pi / 4 - ch * np.pi
vsobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
for ti in tail_list:
ti.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
vTobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
return vsobj, tail_list, vTobj
def createEcoliComp_tunnel(name='...', **kwargs):
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
if not with_T_geo:
kwargs['rT1'] = kwargs['rh1']
vsobj, tail_list, vTobj = createEcoli_tunnel(**kwargs)
ecoli_comp = sf.ForceFreeComposite(center, norm=vsobj.get_u_geo().get_geo_norm(), name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
if with_T_geo:
ecoli_comp.add_obj(vTobj, rel_U=rel_Uh)
return ecoli_comp
def create_ecoli_2part(**problem_kwargs):
# create a ecoli contain two parts, one is head and one is tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
center = problem_kwargs['center']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_list = createEcoli_ellipse(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_obj_list)
head_geo = head_obj.get_u_geo()
# ecoli_comp = sf.ForceFreeComposite(center=head_geo.get_center(), norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp = sf.ForceFreeComposite(center=center, norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj, rel_U=rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_rotlets_tail_2part(rotlet_strength=0, **problem_kwargs):
# create a swimmer with a infinite small head (the limit is a rotlet) and tail(s).
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
dist_hs = problem_kwargs['dist_hs']
lh = ph * ch # length of helix
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj0 = sf.StokesFlowObj()
tail_obj0.combine(tail_list)
tail_obj = sf.FundSoltObj()
tail_obj.set_data(tail_obj0.get_u_geo(), tail_obj0.get_f_geo(), name='rotlets_tail_obj')
location = np.array((0, 0, lh / 2 + dist_hs))
tnorm = tail_obj0.get_u_geo().get_geo_norm()
torque = tnorm * rotlet_strength
tail_obj.add_point_force(location=location, force=torque,
StokesletsHandle=light_rotlets_matrix_3d)
givenT = np.hstack((np.zeros(3), -1 * torque))
ecoli_comp = sf.GivenForceComposite(center=np.zeros(3), norm=tnorm,
name='rotlets_tail_comp', givenF=givenT)
ecoli_comp.add_obj(obj=tail_obj, rel_U=np.zeros(6))
update_order = problem_kwargs['update_order'] \
if 'update_order' in problem_kwargs.keys() \
else 1
update_fun = problem_kwargs['update_fun'] \
if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_2part_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ti = problem_kwargs['ti'] if 'ti' in problem_kwargs.keys() else 0
omega_tail = problem_kwargs['omega_tail'] if 'omega_tail' in problem_kwargs.keys() else 0
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
head_obj = ecoli_comp.get_obj_list()[0]
tail_obj = ecoli_comp.get_obj_list()[1]
head_obj.node_rotation(head_obj.get_u_geo().get_geo_norm(), psi_tail - omega_tail * ti)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
return ecoli_comp
def get_ecoli_nodes_2part_at(*args, **kwargs):
ecoli_comp = create_ecoli_2part_at(*args, **kwargs)
return [i0.get_u_geo().get_nodes() for i0 in ecoli_comp.get_obj_list()]
def get_ecoli_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
n_tail = problem_kwargs['n_tail']
ti = problem_kwargs['ti'] if 'ti' in problem_kwargs.keys() else 0
omega_tail = problem_kwargs['omega_tail'] if 'omega_tail' in problem_kwargs.keys() else 0
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
head_obj = ecoli_comp.get_obj_list()[0]
tail_obj = ecoli_comp.get_obj_list()[1]
head_obj.node_rotation(head_obj.get_u_geo().get_geo_norm(), psi_tail - omega_tail * ti)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
t3 = ecoli_comp.get_obj_list()[0].get_u_nodes()
return t1, t2, t3
def get_ellipsoid_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ds = problem_kwargs['ds']
rs1 = problem_kwargs['rs1']
rs2 = problem_kwargs['rs2']
vsgeo = ellipse_base_geo()
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.set_geo_norm(vsgeo.get_geo_norm() * -1)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=np.pi / 2)
vsgeo.node_rotation(np.array((0, 1, 0)), theta)
vsgeo.node_rotation(np.array((0, 0, 1)), phi)
vsgeo.node_rotation(vsgeo.get_geo_norm(), psi_tail)
vsgeo.move(now_center - vsgeo.get_center())
return [vsgeo.get_nodes(), ]
def create_ecoli_dualTail(**problem_kwargs):
# create a swimmer with two tails in the ends. one is left hand and one is right hand.
# the swimmer contain three parts, i.e. head, upper tail and down tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_l1, tail_obj_l2 = createEcoli_2tails(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj1 = sf.StokesFlowObj()
tail_obj1.set_name('tail_obj1')
tail_obj1.combine(tail_obj_l1)
tail_obj2 = sf.StokesFlowObj()
tail_obj2.set_name('tail_obj2')
tail_obj2.combine(tail_obj_l2)
head_geo = head_obj.get_u_geo()
tnorm = head_geo.get_geo_norm()
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=tnorm, name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj1, rel_U=rel_Uh)
ecoli_comp.add_obj(obj=tail_obj2, rel_U=-rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_dualTail_at(theta, phi, psi_tail1, psi_tail2, center=np.zeros(3),
**problem_kwargs):
assert 1 == 2
ecoli_comp = create_ecoli_dualTail(**problem_kwargs)
# ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
# ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
# tail_obj1 = ecoli_comp.get_obj_list()[1]
# tail_obj1.node_rotation(tail_obj1.get_u_geo().get_geo_norm(), psi_tail1)
# tail_obj2 = ecoli_comp.get_obj_list()[2]
# tail_obj2.node_rotation(tail_obj2.get_u_geo().get_geo_norm(), psi_tail2)
return ecoli_comp
def create_sphere(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs = kwargs['rs']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
obj_sphere = objtype()
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs)
sphere_geo0.set_rigid_velocity([0, 0, 0, 0, 0, 0])
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom((rs + ds * es) / rs)
obj_sphere.set_data(sphere_geo1, sphere_geo0)
obj_list = []
for i0, (t_coord, t_velocity) in enumerate(zip(sphere_coord, sphere_velocity)):
obj2 = obj_sphere.copy()
obj2.set_name('%s_%d' % (namehandle, i0))
obj2.move(t_coord)
obj2.get_u_geo().set_rigid_velocity(t_velocity)
obj_list.append(obj2)
return obj_list
def create_one_ellipse(namehandle='ellipseObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
obj_sphere = objtype() # type: sf.StokesFlowObj
sphere_geo0 = ellipse_base_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs1, rs2)
sphere_geo0.set_rigid_velocity(sphere_velocity)
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
obj_sphere.set_data(sphere_geo1, sphere_geo0, name=namehandle)
obj_sphere.move(sphere_coord)
return obj_sphere
def create_one_ellipse_v2(namehandle='ellipseObj', **kwargs):
matrix_method = kwargs['matrix_method']
ellipse_rs1 = kwargs['ellipse_rs1']
ellipse_rs2 = kwargs['ellipse_rs2']
ellipse_rs3 = kwargs['ellipse_rs3']
ellipse_velocity = kwargs['ellipse_velocity']
ellipse_ds = kwargs['ellipse_ds']
ellipse_es = kwargs['ellipse_es']
ellipse_center = kwargs['ellipse_center']
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires ellipse_es==0. '
assert np.isclose(ellipse_es, 0), err_msg
obj_ellipse = sf.obj_dic[matrix_method]() # type: sf.StokesFlowObj
sphere_geo0 = ellipse_3d_geo() # force geo
sphere_geo0.set_dof(obj_ellipse.get_n_unknown())
sphere_geo0.create_delta(ellipse_ds, ellipse_rs1, ellipse_rs2, ellipse_rs3)
sphere_geo0.set_rigid_velocity(ellipse_velocity)
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
t1 = np.mean((ellipse_rs1, ellipse_rs2, ellipse_rs3))
sphere_geo1.node_zoom(1 + ellipse_ds / t1 * ellipse_es)
obj_ellipse.set_data(sphere_geo1, sphere_geo0, name=namehandle)
obj_ellipse.move(ellipse_center)
return obj_ellipse
def create_move_single_sphere(namehandle='sphereObj', **kwargs):
movez = kwargs['movez']
obj_sphere = create_sphere(namehandle, **kwargs)[0]
displacement = np.array((0, 0, movez))
obj_sphere.move(displacement)
obj_list = (obj_sphere,)
return obj_list
def create_rod(namehandle='rod_obj', **problem_kwargs):
rRod = problem_kwargs['rRod']
lRod = problem_kwargs['lRod']
ntRod = problem_kwargs['ntRod']
eRod = problem_kwargs['eRod']
Rodfct = problem_kwargs['Rodfct']
RodThe = problem_kwargs['RodThe']
RodPhi = problem_kwargs['RodPhi']
rel_URod = problem_kwargs['rel_URod']
RodCenter = problem_kwargs['RodCenter']
zoom_factor = problem_kwargs['zoom_factor']
givenF = problem_kwargs['givenF']
matrix_method = problem_kwargs['matrix_method']
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires eRod==0. '
assert np.isclose(eRod, 0), err_msg
dth = 2 * np.pi / ntRod
rod_geo = tunnel_geo()
rod_geo.create_deltatheta(dth=dth, radius=rRod, length=lRod, epsilon=eRod,
with_cover=1, factor=Rodfct, left_hand=False)
# first displace the rod above the surface, rotate to horizon.
rod_geo.move(displacement=RodCenter)
rod_geo.node_zoom(factor=zoom_factor, zoom_origin=RodCenter)
norm = np.array((0, 1, 0))
theta = -np.pi / 2
rod_geo.node_rotation(norm=norm, theta=theta, rotation_origin=RodCenter)
# then, the rod is rotate in a specified plane, which is parabled to XY plane (the wall) first, then
# rotated angle theta, of an angle phi.
norm = np.array((0, np.sin(RodPhi), np.cos(RodPhi)))
rod_geo.node_rotation(norm=norm, theta=-RodThe, rotation_origin=RodCenter)
rod_obj = sf.obj_dic[matrix_method]()
name = namehandle + '_obj_0'
rod_obj.set_data(f_geo=rod_geo, u_geo=rod_geo, name=name)
name = namehandle + '_0'
rod_comp = sf.GivenForceComposite(center=RodCenter, name=name, givenF=givenF.copy())
rod_comp.add_obj(obj=rod_obj, rel_U=rel_URod)
rod_list = (rod_comp,)
return rod_list
def create_infHelix(namehandle='infhelix', normalize=False, **problem_kwargs):
n_tail = problem_kwargs['n_tail']
eh = problem_kwargs['eh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
nth = problem_kwargs['nth']
zoom_factor = problem_kwargs['zoom_factor']
matrix_method = problem_kwargs['matrix_method']
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires eh==0. '
assert np.isclose(eh, 0), err_msg
if normalize:
rh2 = rh2 * zoom_factor
ph = ph * zoom_factor
rh1 = rh1 * zoom_factor
helix_list = []
for i0, theta0 in enumerate(np.linspace(0, 2 * np.pi, n_tail, endpoint=False)):
infhelix_ugeo = infHelix()
infhelix_ugeo.create_n(rh1, rh2, ph, ch, nth, theta0=theta0)
infhelix_fgeo = infhelix_ugeo.create_fgeo(epsilon=eh)
infhelix_obj = sf.StokesFlowObj()
infhelix_obj.set_data(f_geo=infhelix_fgeo, u_geo=infhelix_ugeo,
name=namehandle + '%02d' % i0)
helix_list.append(infhelix_obj)
return helix_list
def create_helicoid_list(namehandle='helicoid', **problem_kwargs):
r1 = problem_kwargs['helicoid_r1']
r2 = problem_kwargs['helicoid_r2']
ds = problem_kwargs['helicoid_ds']
th_loc = problem_kwargs['helicoid_th_loc']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
matrix_method = problem_kwargs['matrix_method']
assert matrix_method in ('rs', 'lg_rs')
assert ndsk_each == 4
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
tgeo.move(np.array((r1, 0, 0)))
# tgeo.show_nodes()
tgeo_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tgeo21 = tgeo.copy()
tgeo21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tgeo22 = tgeo21.copy()
tgeo_list.append(tgeo21)
tgeo22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo23 = tgeo21.copy()
tgeo_list.append(tgeo22)
tgeo23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo_list.append(tgeo23)
# tgeo3 = base_geo()
# tgeo3.combine(tgeo_list)
# tgeo3.show_nodes(linestyle='')
tobj_list = []
for i0, tgeo in enumerate(tgeo_list):
tobj = sf.StokesFlowObj()
tobj.set_matrix_method(matrix_method) # the geo is regularizeDisk
tobj.set_data(f_geo=tgeo, u_geo=tgeo, name=namehandle + '%02d' % i0)
tobj_list.append(tobj)
return tobj_list
def create_helicoid_comp(*args, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
helicoid_list = create_helicoid_list(*args, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
# print(tobj)
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_list(tobj0, **problem_kwargs):
# assert 1 == 2
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
tobj = tobj0.copy()
tobj.move(np.array((helicoid_r, 0, 0)))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
namehandle = tobj.get_name()
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tobj21 = tobj.copy()
tobj21.set_name('%s_%02d_%01d' % (namehandle, i0, 1))
tobj21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobj21)
tobj22 = tobj21.copy()
tobj22.set_name('%s_%02d_%01d' % (namehandle, i0, 2))
tobj22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj22)
tobj23 = tobj21.copy()
tobj23.set_name('%s_%02d_%01d' % (namehandle, i0, 3))
tobj23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj23)
return tobj_list
def obj2helicoid_list_v2(tobj0, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj0.get_u_geo().get_center()), 0)
namehandle = tobj0.get_name()
t1 = helicoid_r / np.sqrt(2)
tobj0.move((t1, t1, 0))
tobj1 = tobj0.copy()
tobj1.node_rotation(np.array((1, 0, 0)), np.pi / 2, rotation_origin=np.zeros(3))
tobj2 = tobj0.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_v3(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
# assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
# # dbg
# dbg_obj = sf.StokesFlowObj()
# dbg_obj.combine((tobj0, tobj1, tobj2))
# dbg_obj.show_u_nodes()
# assert 1 == 2
#
tobj_list = []
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_selfRotate(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
# helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
# namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj_list = [tobj0, tobj1, tobj2]
return tobj_list
def obj2helicoid_comp(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_v3(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_comp_selfRotate(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_selfRotate(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def creat_dumb_obj(name='helicoid_dumb', **problem_kwargs):
matrix_method = problem_kwargs['matrix_method']
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
es = problem_kwargs['es']
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo0f = sphere_geo0.copy()
sphere_geo0f.node_zoom(1 + ds * es / rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo1f = sphere_geo0f.copy()
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
sphere_geo0f.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1f.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
dumb_geof = base_geo()
dumb_geof.combine([sphere_geo0f, sphere_geo1f], origin=np.zeros(3),
geo_norm=np.array((0, 0, 1)))
dumb_geof.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj = sf.obj_dic[matrix_method]()
tobj.set_data(dumb_geof, dumb_geo, name=name)
return tobj
def creat_dumb_obj_v2(name='helicoid_dumb', **problem_kwargs):
matrix_method = problem_kwargs['matrix_method']
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
es = problem_kwargs['es']
if 'rs' in matrix_method:
err_msg = 'the regularized family methods requires es==0. '
assert np.isclose(es, 0), err_msg
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo0f = sphere_geo0.copy()
sphere_geo0f.node_zoom(1 + ds * es / rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo1f = sphere_geo0f.copy()
# sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
# sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
# sphere_geo0f.move(np.array((0, 0, dumb_d / 2)))
# sphere_geo1f.move(np.array((0, 0, -dumb_d / 2)))
# dumb_geo = base_geo()
# dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
# dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
# dumb_geof = base_geo()
# dumb_geof.combine([sphere_geo0f, sphere_geo1f], origin=np.zeros(3),
# geo_norm=np.array((0, 0, 1)))
# dumb_geof.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj0 = sf.obj_dic[matrix_method]()
tobj0.set_data(sphere_geo0f, sphere_geo0, name='%s_0' % name)
tobj0.move(np.array((0, 0, dumb_d / 2)))
tobj0.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta, rotation_origin= | np.zeros(3) | numpy.zeros |
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis import settings, given, strategies as st
from hypothesis.extra.numpy import arrays
import numpy as np
import tensorflow as tf
from thewalrus.symplectic import two_mode_squeezing
from mrmustard.lab.gates import Sgate, BSgate, S2gate, Ggate, Interferometer, Ggate
from mrmustard.lab.circuit import Circuit
from mrmustard.utils.training import Optimizer
from mrmustard.utils.parametrized import Parametrized
from mrmustard.lab.states import Vacuum
from mrmustard.physics.gaussian import trace, von_neumann_entropy
from mrmustard import settings
from mrmustard.math import Math
math = Math()
@given(n=st.integers(0, 3))
def test_S2gate_coincidence_prob(n):
"""Testing the optimal probability of obtaining |n,n> from a two mode squeezed vacuum"""
tf.random.set_seed(137)
S = S2gate(
r=abs(np.random.normal()),
phi=np.random.normal(),
r_trainable=True,
phi_trainable=True,
)
def cost_fn():
return -tf.abs((Vacuum(2) >> S[0, 1]).ket(cutoffs=[n + 1, n + 1])[n, n]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[S], max_steps=300)
expected = 1 / (n + 1) * (n / (n + 1)) ** n
assert np.allclose(-cost_fn(), expected, atol=1e-5)
@given(i=st.integers(1, 5), k=st.integers(1, 5))
def test_hong_ou_mandel_optimizer(i, k):
"""Finding the optimal beamsplitter transmission to get Hong-Ou-Mandel dip
This generalizes the single photon Hong-Ou-Mandel effect to the many photon setting
see Eq. 20 of https://journals.aps.org/prresearch/pdf/10.1103/PhysRevResearch.3.043065
which lacks a square root in the right hand side.
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
BSgate(
theta=np.arccos(np.sqrt(k / (i + k))) + 0.1 * np.random.normal(),
phi=np.random.normal(),
theta_trainable=True,
phi_trainable=True,
)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
cutoff = 1 + i + k
def cost_fn():
return tf.abs((state_in >> circ).ket(cutoffs=[cutoff] * 4)[i, 1, i + k - 1, k]) ** 2
opt = Optimizer(euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose(
np.cos(circ.trainable_parameters["euclidean"][2]) ** 2, k / (i + k), atol=1e-2
)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=np.random.normal(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
return tf.abs((state_in >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose(np.sinh(circ.trainable_parameters["euclidean"][2]) ** 2, 1, atol=1e-2)
def test_learning_two_mode_squeezing():
"""Finding the optimal beamsplitter transmission to make a pair of single photons"""
tf.random.set_seed(137)
ops = [
Sgate(
r=abs(np.random.normal(size=(2))),
phi=np.random.normal(size=(2)),
r_trainable=True,
phi_trainable=True,
),
BSgate(
theta=np.random.normal(),
phi=np.random.normal(),
theta_trainable=True,
phi_trainable=True,
),
]
circ = Circuit(ops)
tf.random.set_seed(20)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(euclidean_lr=0.05)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_two_mode_Ggate():
"""Finding the optimal Ggate to make a pair of single photons"""
tf.random.set_seed(137)
G = Ggate(num_modes=2, symplectic_trainable=True)
tf.random.set_seed(20)
def cost_fn():
amps = (Vacuum(2) >> G).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[G], max_steps=500)
assert np.allclose(-cost_fn(), 0.25, atol=1e-4)
def test_learning_two_mode_Interferometer():
"""Finding the optimal Interferometer to make a pair of single photons"""
np.random.seed(11)
ops = [
Sgate(
r=np.random.normal(size=(2)) ** 2,
phi=np.random.normal(size=(2)),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=2, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=2)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[2, 2])
return -tf.abs(amps[1, 1]) ** 2 + tf.abs(amps[0, 1]) ** 2
opt = Optimizer(orthogonal_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.25, atol=1e-5)
def test_learning_four_mode_Interferometer():
"""Finding the optimal Interferometer to make a NOON state with N=2"""
np.random.seed(11)
ops = [
Sgate(
r=np.random.uniform(size=4),
phi=np.random.normal(size=4),
r_trainable=True,
phi_trainable=True,
),
Interferometer(num_modes=4, orthogonal_trainable=True),
]
circ = Circuit(ops)
state_in = Vacuum(num_modes=4)
def cost_fn():
amps = (state_in >> circ).ket(cutoffs=[3, 3, 3, 3])
return (
-tf.abs(
tf.reduce_sum(
amps[1, 1]
* np.array([[0, 0, 1 / np.sqrt(2)], [0, 0, 0], [1 / np.sqrt(2), 0, 0]])
)
)
** 2
)
opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000)
assert np.allclose(-cost_fn(), 0.0625, atol=1e-5)
def test_squeezing_hong_ou_mandel_optimizer():
"""Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time
see https://www.pnas.org/content/117/52/33107/tab-article-info
"""
tf.random.set_seed(137)
r = np.arcsinh(1.0)
ops = [
S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1],
S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3],
S2gate(r=1.0, phi=np.random.normal(), r_trainable=True, phi_trainable=True)[1, 2],
]
circ = Circuit(ops)
def cost_fn():
return tf.abs((Vacuum(4) >> circ).ket(cutoffs=[2, 2, 2, 2])[1, 1, 1, 1]) ** 2
opt = Optimizer(euclidean_lr=0.001)
opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300)
assert np.allclose( | np.sinh(circ.trainable_parameters["euclidean"][2]) | numpy.sinh |
# dd_models.py - Distance distribtion parametric models
# ---------------------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import math as m
import numpy as np
import scipy.special as spc
import inspect
from deerlab.utils import metadata
# =================================================================
def docstr_header(title,fcnstr):
"Definition of the header for all distribution models"
return f"""
{title}
The function takes a list or array of parameters and returns the calculated distance distribution::
P = {fcnstr}(r,param)
The built-in information on the model can be accessed via its attributes::
{fcnstr}.parameters # String list of parameter names
{fcnstr}.units # String list of metric units of parameters
{fcnstr}.start # List of values used as start values during optimization
{fcnstr}.lower # List of values used as lower bounds during optimization
{fcnstr}.upper # List of values used as upper bounds during optimization
Parameters
----------
r : array_like
Distance axis, in nanometers.
param : array_like
List of model parameter values.
Returns
-------
P : ndarray
Distance distribution.
"""
# =================================================================
# =================================================================
def docstr_example(fcnstr):
return f"""
Examples
--------
Example of the model evaluated at the start values of the parameters:
.. plot::
import deerlab as dl
import matplotlib.pyplot as plt
import numpy as np
model = dl.{fcnstr}
r = np.linspace(2,5,400)
info = model()
par0 = info['Start']
P = model(r,par0)
plt.figure(figsize=[6,3])
plt.plot(r,P)
plt.xlabel('r (nm)',fontsize=13)
plt.ylabel('P (nm⁻¹)',fontsize=13)
plt.grid(alpha=0.4)
plt.tick_params(labelsize=12)
plt.tick_params(labelsize=12)
plt.tight_layout()
"""
# =================================================================
# =================================================================
def docstring():
"""
Decorator: Insert docstring header to a pre-existing docstring
"""
sep="\n"
def _decorator(func):
docstr = func.__doc__
title = docstr.split("Notes",1)[0]
docstr = docstr.replace(title,"")
func.__doc__ = sep.join([docstr_header(title,func.__name__),docstr])
func.__doc__ = sep.join([func.__doc__,docstr_example(func.__name__)])
return func
return _decorator
# =================================================================
# =================================================================
def _parsparam(r,p,npar):
r,p = np.atleast_1d(r,p)
if len(p)!=npar:
raise ValueError(f'The model function requires {npar} parameters, but {len(p)} are provided.')
return r,p
# =================================================================
# =================================================================
def _normalize(r,P):
if not all(P==0):
P = P/np.trapz(P,r)
return P
# =================================================================
# =================================================================
def _multigaussfun(r,r0,sig,a):
"Compute a distribution with multiple Gaussians"
n = len(r0)
P = np.zeros_like(r)
for k in range(n):
P += a[k]*m.sqrt(1/(2*m.pi))*1/sig[k]*np.exp(-0.5*((r-r0[k])/sig[k])**2)
P = _normalize(r,P)
return P
# =================================================================
def _multirice3dfun(r,nu,sig,a):
# =================================================================
"Compute a distribution with multiple Gaussians"
N = len(nu)
nu = np.maximum(nu,0) # to avoid invalid values
n = 3 # degrees of freedom
P = np.zeros_like(r)
for k in range(N):
s2 = sig[k]**2
I_scaled = spc.ive(n/2-1, nu[k]*r/s2)
P =+ a[k]*nu[k]**(n/2-1)/s2*r**(n/2)*np.exp(-(r**2+nu[k]**2)/(2*s2)+nu[k]*r/s2)*I_scaled
P[P<0] = 0
P = _normalize(r,P)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Mean','Standard deviation'),
units = ('nm','nm'),
start = np.asarray([3.5, 0.2]),
lower = np.asarray([1, 0.05]),
upper = np.asarray([20, 2.5]))
@docstring()
def dd_gauss(r,param):
r"""
Gaussian distribution
Notes
-----
**Model:**
:math:`P(r) = \sqrt{\frac{2}{\pi}}\frac{1}{\sigma}\exp\left(-\frac{(r-\left<r\right>)^2}{\sigma^2}\right)`
============== ======================== ============= ============= ============= ===========================
Variable Symbol Start value Lower bound Upper bound Description
============== ======================== ============= ============= ============= ===========================
``param[0]`` :math:`\left<r\right>` 3.5 1.0 20 Mean (nm)
``param[1]`` :math:`\sigma` 0.2 0.05 2.5 Standard deviation (nm)
============== ======================== ============= ============= ============= ===========================
"""
r,param = _parsparam(r,param,npar=2)
r0 = [param[0]]
sigma = [param[1]]
a = [1.0]
P = _multigaussfun(r,r0,sigma,a)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Mean of 1st Gaussian', 'Standard deviation of 1st Gaussian', 'Amplitude of 1st Gaussian',
'Mean of 2nd Gaussian', 'Standard deviation of 2nd Gaussian', 'Amplitude of 2nd Gaussian'),
units = ('nm','nm','','nm','nm',''),
start = np.asarray([2.5, 0.2, 0.5, 3.5, 0.2, 0.5]),
lower = np.asarray([1, 0.05, 0, 1, 0.05, 0]),
upper = np.asarray([20, 2.5, 1, 20, 2.5, 1]))
@docstring()
def dd_gauss2(r,param):
r"""
Sum of two Gaussian distributions
Notes
-----
**Model:**
:math:`P(r) = a_1\sqrt{\frac{2}{\pi}}\frac{1}{\sigma_1}\exp\left(-\frac{(r-\left<r_1\right>)^2}{\sigma_1^2}\right) + a_2\sqrt{\frac{2}{\pi}}\frac{1}{\sigma_2}\exp\left(-\frac{(r-\left<r_2\right>)^2}{\sigma_2^2}\right)`
============== ========================= ============= ============= ============= ======================================
Variable Symbol Start Value Lower bound Upper bound Description
============== ========================= ============= ============= ============= ======================================
``param[0]`` :math:`\left<r_1\right>` 2.5 1.0 20 1st Gaussian mean distance (nm)
``param[1]`` :math:`\sigma_1` 0.2 0.05 2.5 1st Gaussian standard deviation (nm)
``param[2]`` :math:`a_1` 0.5 0 1 1st Gaussian amplitude
``param[3]`` :math:`\left<r_2\right>` 3.5 1.0 20 2nd Gaussian mean distance (nm)
``param[4]`` :math:`\sigma_2` 0.2 0.05 2.5 2nd Gaussian standard deviation (nm)
``param[5]`` :math:`a_2` 0.5 0 1 2nd Gaussian amplitude
============== ========================= ============= ============= ============= ======================================
"""
r,param = _parsparam(r,param,npar=6)
r0 = [param[0], param[3]]
sigma = [param[1], param[4]]
a = [param[2], param[5]]
P = _multigaussfun(r,r0,sigma,a)
return P
# =================================================================
# =================================================================
@metadata(
parameters = ('Mean of 1st Gaussian', 'Standard deviation of 1st Gaussian', 'Amplitude of 1st Gaussian',
'Mean of 2nd Gaussian', 'Standard deviation of 2nd Gaussian', 'Amplitude of 2nd Gaussian',
'Mean of 3rd Gaussian', 'Standard deviation of 3rd Gaussian', 'Amplitude of 3rd Gaussian'),
units = ('nm','nm','','nm','nm','','nm','nm',''),
start = np.asarray([2.5, 0.2, 0.3, 3.5, 0.2, 0.3, 5, 0.2, 0.3]),
lower = | np.asarray([1, 0.05, 0, 1, 0.05, 0, 1, 0.05, 0]) | numpy.asarray |
"""rq_import.py: Module to import a nexus corp grid & properties, or vdb, or vdb ensemble into resqml format."""
version = '25th May 2021'
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
log.debug('rq_import.py version ' + version)
import os
import numpy as np
import numpy.ma as ma
import glob
import resqpy.olio.load_data as ld
# import resqpy.olio.grid_functions as gf
import resqpy.olio.write_data as wd
import resqpy.olio.ab_toolbox as abt
import resqpy.olio.uuid as bu
import resqpy.olio.weights_and_measures as bwam
import resqpy.olio.xml_et as rqet
import resqpy.olio.vdb as vdb
import resqpy.olio.vector_utilities as vec
import resqpy.olio.trademark as tm
import resqpy.model as rq
import resqpy.crs as rqc
import resqpy.grid as grr
import resqpy.property as rp
import resqpy.time_series as rts
import resqpy.surface as rqs
import resqpy.organize as rqo
def import_nexus(resqml_file_root, # output path and file name without .epc or .h5 extension
extent_ijk = None, # 3 element numpy vector
vdb_file = None, # vdb input file: either this or corp_file should be not None
vdb_case = None, # if None, first case in vdb is used (usually a vdb only holds one case)
corp_file = None, # corp ascii input file: nexus corp data without keyword
corp_bin_file = None, # corp binary file: nexus corp data in bespoke binary format
corp_xy_units = 'm',
corp_z_units = 'm',
corp_z_inc_down = True,
ijk_handedness = 'right',
corp_eight_mode = False,
geometry_defined_everywhere = True,
treat_as_nan = None,
active_mask_file = None,
use_binary = False, # this refers to pure binary arrays, not corp bin format
resqml_xy_units = 'm',
resqml_z_units = 'm',
resqml_z_inc_down = True,
shift_to_local = False,
local_origin_place = 'centre', # 'centre' or 'minimum'
max_z_void = 0.1, # vertical gaps greater than this will introduce k gaps intp resqml grid
split_pillars = True,
split_tolerance = 0.01, # applies to each of x, y, z differences
property_array_files = None, # actually, list of (filename, keyword, uom, time_index, null_value, discrete)
summary_file = None, # used to extract timestep dates when loading recurrent data from vdb
vdb_static_properties = True, # if True, static vdb properties are imported (only relevant if vdb_file is not None)
vdb_recurrent_properties = False,
timestep_selection = 'all', # 'first', 'last', 'first and last', 'all', or list of ints being reporting timestep numbers
use_compressed_time_series = True,
decoarsen = True, # where ICOARSE is present, redistribute data to uncoarse cells
ab_property_list = None, # list of (file_name, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete)
create_property_set = False,
ensemble_case_dirs_root = None, # path upto but excluding realisation number
ensemble_property_dictionary = None, # dictionary mapping title (or keyword) to (filename, property_kind, facet_type, facet,
# uom, time_index, null_value, discrete)
ensemble_size_limit = None,
grid_title = 'ROOT',
mode = 'w',
progress_fn = None):
"""Read a simulation grid geometry and optionally grid properties and return a resqml model in memory & written to disc.
Input may be from nexus ascii input files, or nexus vdb output.
"""
if resqml_file_root.endswith('.epc'): resqml_file_root = resqml_file_root[:-4]
assert mode in ['w', 'a']
if vdb_file:
using_vdb = True
corp_file = corp_bin_file = None
grid_title = grid_title.upper()
log.info('starting import of Nexus ' + str(grid_title) + ' corp from vdb ' + str(vdb_file))
tm.log_nexus_tm('info')
vdbase = vdb.VDB(vdb_file)
case_list = vdbase.cases()
assert len(case_list) > 0, 'no cases found in vdb'
if vdb_case is None:
vdb_case = case_list[0]
else:
assert vdb_case in case_list, 'case ' + vdb_case + ' not found in vdb: ' + vdb_file
vdbase.set_use_case(vdb_case)
assert grid_title in vdbase.list_of_grids(), 'grid ' + str(grid_title) + ' not found in vdb'
if extent_ijk is not None:
vdbase.set_extent_kji(tuple(reversed(extent_ijk)))
log.debug('using case ' + vdb_case + ' and grid ' + grid_title + ' from vdb')
if vdb_recurrent_properties and not summary_file:
if vdb_file.endswith('.vdb.zip'):
summary_file = vdb_file[:-8] + '.sum'
elif vdb_file.endswith('.vdb') or vdb_file.endswith('.zip'):
summary_file = vdb_file[:-4] + '.sum'
else:
sep = vdb_file.rfind(os.sep)
dot = vdb_file[sep + 1:].find('.')
if dot > 0:
summary_file = vdb_file[:sep + 1 + dot] + ',sum'
else:
summary_file = vdb_file + '.sum'
cp_array = vdbase.grid_corp(grid_title)
cp_extent_kji = cp_array.shape[:3]
if cp_extent_kji[:2] == (1, 1): # auto determination of extent failed
assert extent_ijk is not None, 'failed to determine extent of grid from corp data'
(ni, nj, nk) = extent_ijk
assert cp_extent_kji[2] == ni * nj * nk, 'number of cells in grid corp does not match extent'
cp_extent = (nk, nj, ni, 2, 2, 2, 3) # (nk, nj, ni, kp, jp, ip, xyz)
cp_array = cp_array.reshape(cp_extent)
elif extent_ijk is not None:
for axis in range(3):
assert cp_extent_kji[axis] == extent_ijk[2 - axis], 'extent of grid corp data from vdb does not match that supplied'
elif corp_file or corp_bin_file:
if corp_bin_file: corp_file = None
using_vdb = False
# geometry_defined_everywhere = (active_mask_file is None)
log.info('starting import of Nexus corp file ' + str(corp_file if corp_file else corp_bin_file))
tm.log_nexus_tm('info')
if extent_ijk is None: # auto detect extent
extent_kji = None
cp_extent = None
else:
(ni, nj, nk) = extent_ijk
extent_kji = np.array((nk, nj, ni), dtype = 'int')
cp_extent = (nk, nj, ni, 2, 2, 2, 3) # (nk, nj, ni, kp, jp, ip, xyz)
log.debug('reading and resequencing corp data')
if corp_bin_file: # bespoke nexus corp bin format, not to be confused with pure binary files used below
cp_array = ld.load_corp_array_from_file(corp_bin_file, extent_kji, corp_bin = True,
comment_char = None, # comment char will be detected automatically
data_free_of_comments = False,
use_binary = use_binary)
else:
cp_binary_file = abt.cp_binary_filename(corp_file, nexus_ordering = False) # pure binary, not bespoke corp bin used above
recent_binary_exists = ld.file_exists(cp_binary_file, must_be_more_recent_than_file = corp_file)
cp_array = None
if use_binary and (extent_ijk is not None) and recent_binary_exists:
try:
cp_array = ld.load_array_from_file(cp_binary_file, cp_extent, use_binary = True)
except:
cp_array = None
if cp_array is None:
cp_array = ld.load_corp_array_from_file(corp_file, extent_kji, corp_bin = False,
comment_char = None, # comment char will be detected automatically
data_free_of_comments = False,
use_binary = use_binary)
if use_binary:
wd.write_pure_binary_data(cp_binary_file, cp_array) # NB: this binary file is resequenced, not in nexus ordering!
else:
raise ValueError('vdb_file and corp_file are both None in import_nexus() call')
if cp_array is None:
log.error('failed to create corner point array')
return None
if extent_ijk is None:
cp_extent = cp_array.shape
extent_kji = cp_extent[:3]
(nk, nj, ni) = extent_kji
extent_ijk = (ni, nj, nk)
else:
ni, nj, nk = extent_ijk
# convert units
log.debug('Converting units')
if corp_xy_units == corp_z_units and resqml_xy_units == resqml_z_units:
bwam.convert_lengths(cp_array, corp_xy_units, resqml_xy_units)
else:
bwam.convert_lengths(cp_array[:, :, :, :, :, :, 0:1], corp_xy_units, resqml_xy_units)
bwam.convert_lengths(cp_array[:, :, :, :, :, :, 2], corp_z_units, resqml_z_units)
# invert z if required
if resqml_z_inc_down != corp_z_inc_down:
log.debug('Inverting z values')
inversion = np.negative(cp_array[:, :, :, :, :, :, 2])
cp_array[:, :, :, :, :, :, 2] = inversion
# read active cell mask
log.debug('Setting up active cell mask')
active_mask = inactive_mask = None
if vdb_file:
assert vdbase is not None, 'problem with vdb object'
inactive_mask = vdbase.grid_kid_inactive_mask(grid_title) # TODO: check conversion of KID to boolean for LGRs
if inactive_mask is not None:
log.debug('using kid array as inactive cell mask')
active_mask = np.logical_not(inactive_mask)
else:
log.warning('kid array not found, using unpack array as active cell indicator')
unp = vdbase.grid_unpack(grid_title)
assert unp is not None, 'failed to load active cell indicator mask from vdb kid or unpack arrays'
active_mask = np.empty((nk, nj, ni), dtype = 'bool')
active_mask[:] = (unp > 0)
inactive_mask = np.logical_not(active_mask)
elif active_mask_file:
active_mask = ld.load_array_from_file(active_mask_file, extent_kji, data_type = 'bool', use_binary = use_binary)
if active_mask is None:
log.error('failed to load active cell indicator array from file: ' + active_mask_file)
else:
inactive_mask = np.logical_not(active_mask) # will crash if active mask load failed
# shift grid geometry to local crs
local_origin = np.zeros(3)
if shift_to_local:
log.debug('shifting to local origin at ' + local_origin_place)
if local_origin_place == 'centre':
local_origin = np.nanmean(cp_array, axis = (0, 1, 2, 3, 4, 5))
elif local_origin_place == 'minimum':
local_origin = np.nanmin(cp_array, axis = (0, 1, 2, 3, 4, 5)) - 1.0 # The -1 ensures all coords are >0
else:
assert(False)
cp_array -= local_origin
# create empty resqml model
log.debug('creating an empty resqml model')
if mode == 'w':
model = rq.Model(resqml_file_root, new_epc = True, create_basics = True, create_hdf5_ext = True)
else:
model = rq.Model(resqml_file_root)
assert model is not None
ext_uuid = model.h5_uuid()
assert ext_uuid is not None
# create coodinate reference system (crs) in model and set references in grid object
log.debug('creating coordinate reference system')
crs_roots = model.roots(obj_type = 'LocalDepth3dCrs')
if mode == 'w' or len(crs_roots) == 0:
crs_node = model.create_crs(add_as_part = True,
x_offset = local_origin[0], y_offset = local_origin[1], z_offset = local_origin[2],
xy_units = resqml_xy_units, z_units = resqml_z_units,
z_inc_down = resqml_z_inc_down)
crs_uuid = bu.uuid_from_string(crs_node.attrib['uuid'])
else:
new_crs = rqc.Crs(model, x_offset = local_origin[0], y_offset = local_origin[1], z_offset = local_origin[2],
xy_units = resqml_xy_units, z_units = resqml_z_units, z_inc_down = resqml_z_inc_down)
crs_uuid = None
for crs_root in crs_roots:
existing_crs = rqc.Crs(model, crs_root = crs_root)
if new_crs.is_equivalent(existing_crs):
crs_uuid = existing_crs.uuid
break
if crs_uuid is None:
new_crs.create_xml()
crs_uuid = new_crs.uuid
grid = grid_from_cp(model, cp_array, crs_uuid,
active_mask = active_mask,
geometry_defined_everywhere = geometry_defined_everywhere,
treat_as_nan = treat_as_nan,
max_z_void = max_z_void,
split_pillars = split_pillars, split_tolerance = split_tolerance,
ijk_handedness = ijk_handedness,
known_to_be_straight = False)
# create hdf5 file using arrays cached in grid above
log.info('writing grid geometry to hdf5 file ' + resqml_file_root + '.h5')
grid.write_hdf5_from_caches(resqml_file_root + '.h5', mode = mode, write_active = False)
# build xml for grid geometry
log.debug('building xml for grid')
ijk_node = grid.create_xml(ext_uuid = None, title = grid_title, add_as_part = True, add_relationships = True)
assert ijk_node is not None, 'failed to create IjkGrid node in xml tree'
# impprt property arrays into a collection
prop_import_collection = None
decoarsen_array = None
ts_node = None
ts_uuid = None
if active_mask is None and grid.inactive is not None: active_mask = np.logical_not(grid.inactive)
if using_vdb:
prop_import_collection = rp.GridPropertyCollection()
if vdb_static_properties:
props = vdbase.grid_list_of_static_properties(grid_title)
if len(props) > 0:
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
for keyword in props:
prop_import_collection.import_vdb_static_property_to_cache(vdbase, keyword, grid_name = grid_title)
# if active_mask is not None:
# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',
# discrete = True, uom = None, time_index = None, null_value = None)
elif property_array_files is not None and len(property_array_files) > 0:
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
for (p_filename, p_keyword, p_uom, p_time_index, p_null_value, p_discrete) in property_array_files:
prop_import_collection.import_nexus_property_to_cache(p_filename, p_keyword, grid.extent_kji, discrete = p_discrete,
uom = p_uom, time_index = p_time_index,
null_value = p_null_value, use_binary = use_binary)
# if active_mask is not None:
# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',
# discrete = True, uom = None, time_index = None, null_value = None)
# ab_property_list: list of (filename, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete)
elif ab_property_list is not None and len(ab_property_list) > 0:
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
for (p_filename, p_keyword, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index, p_null_value, p_discrete) in ab_property_list:
prop_import_collection.import_ab_property_to_cache(p_filename, p_keyword, grid.extent_kji, discrete = p_discrete,
property_kind = p_property_kind, facet_type = p_facet_type, facet = p_facet,
uom = p_uom, time_index = p_time_index,
null_value = p_null_value)
# if active_mask is not None:
# prop_import_collection.add_cached_array_to_imported_list(active_mask, active_mask_file, 'ACTIVE', property_kind = 'active',
# discrete = True, uom = None, time_index = None, null_value = None)
# ensemble_property_dictionary: mapping title (or keyword) to
# (filename, property_kind, facet_type, facet, uom, time_index, null_value, discrete)
elif ensemble_case_dirs_root and ensemble_property_dictionary:
case_path_list = glob.glob(ensemble_case_dirs_root + '*')
assert len(case_path_list) > 0, 'no case directories found with path starting: ' + str(ensemble_case_dirs_root)
case_number_place = len(ensemble_case_dirs_root)
case_zero_used = False
case_count = 0
for case_path in case_path_list:
if ensemble_size_limit is not None and case_count >= ensemble_size_limit:
log.warning('stopping after reaching ensemble size limit')
break
# NB. import each case individually rather than holding property arrays for whole ensemble in memory at once
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
tail = case_path[case_number_place:]
try:
case_number = int(tail)
assert case_number >= 0, 'negative case number encountered'
if case_number == 0:
assert not case_zero_used, 'more than one case number evaluated to zero'
case_zero_used = True
except:
log.error('failed to determine case number for tail: ' + str(tail))
continue
for keyword in ensemble_property_dictionary.keys():
(filename, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index, p_null_value, p_discrete) = ensemble_property_dictionary[keyword]
p_filename = os.path.join(case_path, filename)
if not os.path.exists(p_filename):
log.error('missing property file: ' + p_filename)
continue
prop_import_collection.import_nexus_property_to_cache(p_filename, keyword, grid.extent_kji, discrete = p_discrete,
uom = p_uom, time_index = p_time_index, null_value = p_null_value,
property_kind = p_property_kind, facet_type = p_facet_type, facet = p_facet,
realization = case_number, use_binary = False)
if len(prop_import_collection.imported_list) > 0:
# create hdf5 file using arrays cached in grid above
log.info('writing properties to hdf5 file ' + str(resqml_file_root) + '.h5 for case: ' + str(case_number))
grid.write_hdf5_from_caches(resqml_file_root + '.h5', geometry = False, imported_properties = prop_import_collection, write_active = False)
# add imported properties parts to model, building property parts list
prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid, time_series_uuid = ts_uuid)
if create_property_set:
prop_import_collection.create_property_set_xml('realisation ' + str(case_number))
case_count += 1
# remove cached static property arrays from memory
# prop_import_collection.remove_all_cached_arrays()
del prop_import_collection
prop_import_collection = None
log.info(f'Nexus ascii ensemble input processed {case_count} cases')
tm.log_nexus_tm('info')
# create hdf5 file using arrays cached in grid above
if prop_import_collection is not None and len(prop_import_collection.imported_list) > 0:
if decoarsen:
decoarsen_array = prop_import_collection.decoarsen_imported_list()
if decoarsen_array is not None:
log.info('static properties decoarsened')
prop_import_collection.add_cached_array_to_imported_list(decoarsen_array, 'decoarsen', 'DECOARSEN', discrete = True,
uom = None, time_index = None, null_value = -1,
property_kind = 'discrete')
log.info('writing ' + str(len(prop_import_collection.imported_list)) + ' properties to hdf5 file ' + resqml_file_root + '.h5')
elif not ensemble_case_dirs_root:
log.info('no static grid properties to import')
prop_import_collection = None
grid.write_hdf5_from_caches(resqml_file_root + '.h5', geometry = False, imported_properties = prop_import_collection, write_active = True)
# remove cached static property arrays from memory
if prop_import_collection is not None:
prop_import_collection.remove_all_cached_arrays()
ts_selection = None
if using_vdb and vdb_recurrent_properties and timestep_selection is not None and str(timestep_selection) != 'none':
if prop_import_collection is None:
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
# extract timestep dates from summary file (this info might be hidden in the recurrent binary files but I couldn't find it
# todo: create cut down time series from recurrent files and differentiate between reporting time index and mapped time step number
full_time_series = rts.time_series_from_nexus_summary(summary_file)
if full_time_series is None:
log.error('failed to fetch time series from Nexus summary file; recurrent data excluded')
tm.log_nexus_tm('error')
else:
full_time_series.set_model(model)
timestep_list = vdbase.grid_list_of_timesteps(grid_title) # get list of timesteps for which recurrent files exist
recur_time_series = None
for timestep_number in timestep_list:
if isinstance(timestep_selection, list):
if timestep_number not in timestep_selection: continue
else:
if timestep_selection == 'first':
if timestep_number != timestep_list[0]: break
elif timestep_selection == 'last':
if timestep_number != timestep_list[-1]: continue
elif timestep_selection == 'first and last':
if timestep_number != timestep_list[0] and timestep_number != timestep_list[-1]: continue
# default to importing all timesteps
stamp = full_time_series.timestamp(timestep_number)
if stamp is None:
log.error('timestamp number for which recurrent data exists was not found in summary file: ' + str(timestep_number))
continue
recur_prop_list = vdbase.grid_list_of_recurrent_properties(grid_title, timestep_number)
common_recur_prop_set = set()
if recur_time_series is None:
recur_time_series = rts.TimeSeries(model, extract_from_xml = False, first_timestamp = stamp)
if recur_prop_list is not None: common_recur_prop_set = set(recur_prop_list)
else:
recur_time_series.add_timestamp(stamp)
if recur_prop_list is not None: common_recur_prop_set = common_recur_prop_set.intersection(set(recur_prop_list))
step_import_collection = rp.GridPropertyCollection()
step_import_collection.set_grid(grid)
# for each property for this timestep, cache array and add to recur prop import collection for this time step
if recur_prop_list:
for keyword in recur_prop_list:
if not keyword or not keyword.isalnum(): continue
step_import_collection.import_vdb_recurrent_property_to_cache(vdbase, timestep_number, keyword, grid_name = grid_title)
# extend hdf5 with cached arrays for this timestep
log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) +
' is: ' + str(step_import_collection.number_of_imports()))
if decoarsen_array is not None:
log.info('decoarsening recurrent properties for timestep: ' + str(timestep_number))
step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)
log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))
grid.write_hdf5_from_caches(resqml_file_root + '.h5', mode = 'a', geometry = False,
imported_properties = step_import_collection, write_active = False)
# add imported list for this timestep to full imported list
prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)
log.debug('total number of property arrays after timestep: ' + str(timestep_number) +
' is: ' + str(prop_import_collection.number_of_imports()))
# remove cached copies of arrays
step_import_collection.remove_all_cached_arrays()
ts_node = full_time_series.create_xml(title = 'simulator full timestep series')
model.time_series = ts_node # save as the primary time series for the model
ts_uuid = rqet.uuid_for_part_root(ts_node)
# create xml for recur_time_series (as well as for full_time_series) and add as part; not needed?
if recur_time_series is not None:
rts_node = recur_time_series.create_xml(title = 'simulator recurrent array timestep series')
if use_compressed_time_series:
ts_uuid = rqet.uuid_for_part_root(rts_node)
ts_selection = timestep_list
# add imported properties parts to model, building property parts list
if prop_import_collection is not None and prop_import_collection.imported_list is not None:
prop_import_collection.set_grid(grid) # update to pick up on recently created xml root node for grid
prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid, time_series_uuid = ts_uuid,
selected_time_indices_list = ts_selection)
if create_property_set:
prop_import_collection.create_property_set_xml('property set for import for grid ' + str(grid_title))
# mark model as modified (will already have happened anyway)
model.set_modified()
# create epc file
log.info('storing model in epc file ' + resqml_file_root + '.epc')
model.store_epc(resqml_file_root + '.epc')
# return resqml model
return model
def import_vdb_all_grids(resqml_file_root, # output path and file name without .epc or .h5 extension
extent_ijk = None, # 3 element numpy vector applicable to ROOT
vdb_file = None,
vdb_case = None, # if None, first case in vdb is used (usually a vdb only holds one case)
corp_xy_units = 'm',
corp_z_units = 'm',
corp_z_inc_down = True,
ijk_handedness = 'right',
geometry_defined_everywhere = True,
treat_as_nan = None,
resqml_xy_units = 'm',
resqml_z_units = 'm',
resqml_z_inc_down = True,
shift_to_local = False,
local_origin_place = 'centre', # 'centre' or 'minimum'
max_z_void = 0.1, # vertical gaps greater than this will introduce k gaps intp resqml grid
split_pillars = True,
split_tolerance = 0.01, # applies to each of x, y, z differences
vdb_static_properties = True, # if True, static vdb properties are imported (only relevant if vdb_file is not None)
vdb_recurrent_properties = False,
decoarsen = True,
timestep_selection = 'all', # 'first', 'last', 'first and last', 'all', or list of ints being reporting timestep numbers
create_property_set = False):
"""Creates a RESQML dataset containing grids and grid properties, including LGRs, for a single realisation."""
vdbase = vdb.VDB(vdb_file)
case_list = vdbase.cases()
assert len(case_list) > 0, 'no cases found in vdb'
if vdb_case is None:
vdb_case = case_list[0]
else:
assert vdb_case in case_list, 'case ' + vdb_case + ' not found in vdb: ' + vdb_file
vdbase.set_use_case(vdb_case)
grid_list = vdbase.list_of_grids()
index = 0
for grid_name in grid_list:
if grid_name.upper().startswith('SMALLGRIDS'):
log.warning('vdb import skipping small grids')
continue
log.debug('importing vdb data for grid ' + str(grid_name))
import_nexus(resqml_file_root,
extent_ijk = extent_ijk if grid_name == 'ROOT' else None, # 3 element numpy vector applicable to ROOT
vdb_file = vdb_file,
vdb_case = vdb_case, # if None, first case in vdb is used (usually a vdb only holds one case)
corp_xy_units = corp_xy_units,
corp_z_units = corp_z_units,
corp_z_inc_down = corp_z_inc_down,
ijk_handedness = ijk_handedness,
geometry_defined_everywhere = geometry_defined_everywhere,
treat_as_nan = treat_as_nan,
resqml_xy_units = resqml_xy_units,
resqml_z_units = resqml_z_units,
resqml_z_inc_down = resqml_z_inc_down,
shift_to_local = shift_to_local,
local_origin_place = local_origin_place, # 'centre' or 'minimum'
max_z_void = max_z_void, # vertical gaps greater than this will introduce k gaps intp resqml grid
split_pillars = split_pillars, # NB: some LGRs may be unsplit even if ROOT is split
split_tolerance = split_tolerance, # applies to each of x, y, z differences
vdb_static_properties = vdb_static_properties, # if True, static vdb properties are imported
vdb_recurrent_properties = vdb_recurrent_properties,
decoarsen = decoarsen,
timestep_selection = timestep_selection,
create_property_set = create_property_set,
grid_title = grid_name,
mode = 'w' if index == 0 else 'a')
index += 1
def import_vdb_ensemble(epc_file,
ensemble_run_dir,
existing_epc = False,
keyword_list = None,
property_kind_list = None,
vdb_static_properties = True, # if True, static vdb properties are imported
vdb_recurrent_properties = True,
decoarsen = True,
timestep_selection = 'all',
create_property_set_per_realization = True,
create_property_set_per_timestep = True,
create_complete_property_set = False,
# remaining arguments only used if existing_epc is False
extent_ijk = None, # 3 element numpy vector
corp_xy_units = 'metres',
corp_z_units = 'metres',
corp_z_inc_down = True,
ijk_handedness = 'right',
geometry_defined_everywhere = True,
treat_as_nan = None,
resqml_xy_units = 'metres',
resqml_z_units = 'metres',
resqml_z_inc_down = True,
shift_to_local = True,
local_origin_place = 'centre', # 'centre' or 'minimum'
max_z_void = 0.1, # import will fail if vertical void greater than this is encountered
split_pillars = True,
split_tolerance = 0.01, # applies to each of x, y, z differences
progress_fn = None):
"""Adds properties from all vdb's within an ensemble directory tree to a single RESQML dataset, referencing a shared grid.
args:
epc_file (string): filename of epc file to be extended with ensemble properties
ensemble_run_dir (string): path of main ensemble run directory; vdb's within this directory tree are source of import
existing_epc (boolean, default False): if True, the epc_file must already exist and contain the compatible grid
keyword_list (list of strings, optional): if present, only properties for keywords within the list are included
property_kind_list (list of strings, optional): if present, only properties which are mapped to these resqml property
kinds are included in the import
vdb_static_properties (boolean, default True): if False, no static properties are included, regardless of keyword and/or
property kind matches
vdb_recurrent_properties (boolean, default True): if False, no recurrent properties are included, regardless of keyword
and/or property kind matches
decoarsen (boolean, default True): if True and ICOARSE property exists for a grid in a case, the associated property
data is decoarsened; if False, the property data is as stored in the vdb
timestep_selection (string, default 'all'): may be 'first', 'last', 'first and last', or 'all', controlling which
reporting timesteps are included when loading recurrent data
create_property_set_per_realization (boolean, default True): if True, a property set object is created for each realization
create_property_set_per_timestep (boolean, default True): if True, a property set object is created for each timestep
included in the recurrent data import
create_complete_property_set (boolean, default False): if True, a property set object is created containing all the
properties imported; only really useful to differentiate from other properties related to the grid
extent_ijk (triple int, optional): this and remaining arguments are only used if existing_epc is False; the extent
is only needed in case automatic determination of the extent fails
corp_xy_units (string, default 'metres'): the units of x & y values in the vdb corp data; should be 'metres' or 'feet'
corp_z_units (string, default 'metres'): the units of z values in the vdb corp data; should be 'metres' or 'feet'
corp_z_inc_down (boolean, default True): set to True if corp z values are depth; False if elevation
ijk_handedness (string, default 'right'): set to the handedness of the IJK axes in the Nexus model; 'right' or 'left'
geometry_defined_everywhere (boolean, default True): set to False if inactive cells do not have valid geometry;
deprecated - use treat_as_nan argument instead
treat_as_nan (string, optional): if not None, one of 'dots', 'ij_dots', 'inactive'; controls which inactive cells
have their geometry set to undefined
resqml_xy_units (string, default 'metres'): the units of x & y values to use in the generated resqml grid;
should be 'metres' or 'feet'
resqml_z_units (string, default 'metres'): the units of z values to use in the generated resqml grid;
should be 'metres' or 'feet'
resqml_z_inc_down (boolean, default True): set to True if resqml z values are to be depth; False for elevations
shift_to_local (boolean, default True): if True, the resqml coordinate reference system will use a local origin
local_origin_place (string, default 'centre'): where to place the local origin; 'centre' or 'minimum'; only
relevant if shift_to_local is True
max_z_void (float, default 0.1): the tolerance of voids between layers, in z direction; voids greater than this
will cause the grid import to fail
split_pillars (boolean, default True): if False, a grid is generated without split pillars
split_tolerance (float, default 0.01): the tolerance applied to each of x, y, & z values, beyond which a corner
point (and hence pillar) will be split
progress_fn (function(float), optional): if present, this function is called at intervals during processing; it
must accept one floating point argument which will range from 0.0 to 1.0
returns:
resqpy.Model object containing properties for all the realisations; hdf5 and epc files having been updated
note:
if existing_epc is True, the epc file must already exist and contain one grid (or one grid named ROOT) which must
have the correct extent for all realisations within the ensemble; if existing_epc is False, the resqml dataset is
created afresh with a grid extracted from the first realisation in the ensemble; either way, the single grid is used
as the representative grid in the ensemble resqml dataset being generated;
all vdb directories within the directory tree headed by ensemble_run_dir are included in the import; by
default all properties will be imported; the keyword_list, property_kind_list, vdb_static_properties,
vdb_recurrent_properties and timestep_selection arguments can be used to filter the required properties;
if both keyword_list and property_kind_list are provided, a property must match an item in both lists in order
to be included; if recurrent properties are being included then all vdb's should contain the same number of reporting
steps in their recurrent data and these should relate to the same set of timestamps; timestamp data is extracted from a
summary file for the first realisation; no check is made to ensure that reporting timesteps in different realisations
are actually for the same date.
"""
assert epc_file.endswith('.epc')
assert vdb_static_properties or vdb_recurrent_properties, 'no properties selected for ensemble import'
if progress_fn is not None: progress_fn(0.0)
# fetch a sorted list of the vdb paths found in the run directory tree
ensemble_list = vdb.ensemble_vdb_list(ensemble_run_dir)
if len(ensemble_list) == 0:
log.error("no vdb's found in run directory tree: " + str(ensemble_run_dir))
return None
if not existing_epc:
model = import_nexus(epc_file[:-4], # output path and file name without .epc or .h5 extension
extent_ijk = extent_ijk, # 3 element numpy vector, in case extent is not automatically determined
vdb_file = ensemble_list[0], # vdb input file
corp_xy_units = corp_xy_units,
corp_z_units = corp_z_units,
corp_z_inc_down = corp_z_inc_down,
ijk_handedness = ijk_handedness,
geometry_defined_everywhere = geometry_defined_everywhere,
treat_as_nan = treat_as_nan,
resqml_xy_units = resqml_xy_units,
resqml_z_units = resqml_z_units,
resqml_z_inc_down = resqml_z_inc_down,
shift_to_local = shift_to_local,
local_origin_place = local_origin_place, # 'centre' or 'minimum'
max_z_void = max_z_void, # import will fail if vertical void greater than this is encountered
split_pillars = split_pillars,
split_tolerance = split_tolerance, # applies to each of x, y, z differences
vdb_static_properties = False,
vdb_recurrent_properties = False,
create_property_set = False)
model = rq.Model(epc_file = epc_file) # shouldn't be necessary if just created but it feels safer to re-open the model
assert model is not None, 'failed to instantiate model'
grid = model.grid()
assert grid is not None, 'grid not found'
ext_uuid = model.h5_uuid()
assert ext_uuid is not None, 'failed to determine uuid for hdf5 file reference'
hdf5_file = model.h5_file_name(uuid = ext_uuid)
# create reporting timestep time series for recurrent data, if required, based on the first realisation
recur_time_series = None
recur_ts_uuid = None
timestep_list = None
if vdb_recurrent_properties:
summary_file = ensemble_list[0][:-4] + '.sum' # TODO: check timestep summary file extension, .tssum?
full_time_series = rts.time_series_from_nexus_summary(summary_file)
if full_time_series is None:
log.error('failed to extract info from timestep summary file; disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
vdbase = vdb.VDB(ensemble_list[0])
timestep_list = vdbase.list_of_timesteps()
if len(timestep_list) == 0:
log.warning('no ROOT recurrent data found in vdb for first realisation; disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
if timestep_selection == 'all' or ('first' in timestep_selection): fs_index = 0
else: fs_index = -1
first_stamp = full_time_series.timestamp(timestep_list[fs_index])
if first_stamp is None:
log.error('first timestamp number selected for import was not found in summary file: ' + str(timestep_list[fs_index]))
log.error('disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
recur_time_series = rts.TimeSeries(model, extract_from_xml = False, first_timestamp = first_stamp)
if timestep_selection == 'all': remaining_list = timestep_list[1:]
elif timestep_selection == 'first and last': remaining_list = [timestep_list[-1]]
else: remaining_list = []
for timestep_number in remaining_list:
stamp = full_time_series.timestamp(timestep_number)
if stamp is None:
log.error('timestamp number for which recurrent data exists was not found in summary file: ' + str(timestep_number))
log.error('disabling recurrent property import')
vdb_recurrent_properties = False
recur_time_series = None
break
recur_time_series.add_timestamp(stamp)
if recur_time_series is not None:
recur_ts_node = recur_time_series.create_xml(title = 'simulator recurrent array timestep series')
recur_ts_uuid = rqet.uuid_for_part_root(recur_ts_node)
model.time_series = recur_ts_node # save as the primary time series for the model
if create_complete_property_set or create_property_set_per_timestep:
complete_collection = rp.GridPropertyCollection()
complete_collection.set_grid(grid)
else:
complete_collection = None
# main loop over realisations
for realisation in range(len(ensemble_list)):
if progress_fn is not None: progress_fn(float(1 + realisation) / float(1 + len(ensemble_list)))
vdb_file = ensemble_list[realisation]
log.info('processing realisation ' + str(realisation) + ' from: ' + str(vdb_file))
vdbase = vdb.VDB(vdb_file)
# case_list = vdbase.cases()
# assert len(case_list) > 0, 'no cases found in vdb: ' + str(vdb_file)
# if len(case_list) > 1: log.warning('more than one case found in vdb (using first): ' + str(vdb_file))
# vdb_case = case_list[0]
# vdbase.set_use_case(vdb_case)
vdbase.set_extent_kji(grid.extent_kji)
prop_import_collection = rp.GridPropertyCollection(realization = realisation)
prop_import_collection.set_grid(grid)
decoarsen_array = None
if vdb_static_properties:
props = vdbase.list_of_static_properties()
if len(props) > 0:
for keyword in props:
if keyword_list is not None and keyword not in keyword_list: continue
if property_kind_list is not None:
prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)
if prop_kind not in property_kind_list and prop_kind not in ['active', 'region initialization']: continue
prop_import_collection.import_vdb_static_property_to_cache(vdbase, keyword, realization = realisation)
if decoarsen:
decoarsen_array = prop_import_collection.decoarsen_imported_list()
if decoarsen_array is not None: log.debug('static properties decoarsened for realisation ' + str(realisation))
grid.write_hdf5_from_caches(hdf5_file, mode = 'a', geometry = False,
imported_properties = prop_import_collection, write_active = False)
prop_import_collection.remove_all_cached_arrays()
if vdb_recurrent_properties:
r_timestep_list = vdbase.list_of_timesteps() # get list of timesteps for which recurrent files exist
if len(r_timestep_list) < recur_time_series.number_of_timestamps():
log.error('insufficient number of reporting timesteps; skipping recurrent data for realisation ' + str(realisation))
else:
common_recur_prop_set = None
for tni in range(recur_time_series.number_of_timestamps()):
if timestep_selection in ['all', 'first']:
timestep_number = timestep_list[tni]
r_timestep_number = r_timestep_list[tni]
elif timestep_selection == 'last' or tni > 0:
timestep_number = timestep_list[-1]
r_timestep_number = r_timestep_list[-1]
else:
timestep_number = timestep_list[0]
r_timestep_number = r_timestep_list[0]
stamp = full_time_series.timestamp(timestep_number)
recur_prop_list = vdbase.list_of_recurrent_properties(r_timestep_number)
if common_recur_prop_set is None: common_recur_prop_set = set(recur_prop_list)
elif recur_prop_list is not None: common_recur_prop_set = common_recur_prop_set.intersection(set(recur_prop_list))
step_import_collection = rp.GridPropertyCollection()
step_import_collection.set_grid(grid)
# for each property for this timestep, cache array and add to recur prop import collection for this time step
if recur_prop_list:
for keyword in recur_prop_list:
if not keyword or not keyword.isalnum(): continue
if keyword_list is not None and keyword not in keyword_list: continue
if property_kind_list is not None:
prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)
if prop_kind not in property_kind_list: continue
step_import_collection.import_vdb_recurrent_property_to_cache(vdbase, r_timestep_number, keyword,
time_index = tni, # index into recur_time_series
realization = realisation)
if decoarsen_array is not None:
step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)
# extend hdf5 with cached arrays for this timestep
# log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) +
# ' is: ' + str(step_import_collection.number_of_imports()))
# log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))
grid.write_hdf5_from_caches(hdf5_file, mode = 'a', geometry = False,
imported_properties = step_import_collection, write_active = False)
# add imported list for this timestep to full imported list
prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)
# log.debug('total number of property arrays after timestep: ' + str(timestep_number) +
# ' is: ' + str(prop_import_collection.number_of_imports()))
# remove cached copies of arrays
step_import_collection.remove_all_cached_arrays()
if len(prop_import_collection.imported_list) == 0:
log.warning('no properties imported for realisation ' + str(realisation))
continue
prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid, time_series_uuid = recur_ts_uuid)
if create_property_set_per_realization:
prop_import_collection.create_property_set_xml('property set for realization ' + str(realisation))
if complete_collection is not None:
complete_collection.inherit_parts_from_other_collection(prop_import_collection)
if complete_collection is not None:
if create_property_set_per_timestep and recur_time_series is not None:
for tni in range(recur_time_series.number_of_timestamps()):
ts_collection = rp.selective_version_of_collection(complete_collection, time_index = tni)
if ts_collection.number_of_parts() > 0:
ts_collection.create_property_set_xml('property set for time index ' + str(tni))
if create_complete_property_set:
complete_collection.create_property_set_xml('property set for ensemble vdb import')
# mark model as modified (will already have happened anyway)
model.set_modified()
# rewrite epc file
log.info('storing updated model in epc file ' + epc_file)
model.store_epc(epc_file)
if progress_fn is not None: progress_fn(1.0)
# return updated resqml model
return model
def add_ab_properties(epc_file, # existing resqml model
grid_uuid = None, # optional grid uuid, required if more than one grid in model; todo: handle list of grids?
ext_uuid = None, # if None, hdf5 file holding grid geometry will be used
ab_property_list = None): # list of (file_name, keyword, property_kind, facet_type, facet, uom, time_index, null_value,
# discrete, realization)
"""Process a list of pure binary property array files, adding as parts of model, related to grid (hdf5 file is appended to)."""
assert ab_property_list, 'property list is empty or missing'
model = rq.Model(epc_file = epc_file)
grid_node = model.root_for_ijk_grid(uuid = grid_uuid) # will raise an exception if uuid is None and Model has more than 1 grid
assert grid_node is not None, 'grid not found in model'
grid = grr.any_grid(parent_model = model, grid_root = grid_node, find_properties = False)
if ext_uuid is None:
ext_node = rqet.find_nested_tags(grid.geometry_root, ['Points', 'Coordinates', 'HdfProxy', 'UUID'])
if ext_node is not None: ext_uuid = bu.uuid_from_string(ext_node.text.strip())
# ab_property_list: list of (filename, keyword, property_kind, facet_type, facet, uom, time_index, null_value, discrete, realization)
prop_import_collection = rp.GridPropertyCollection()
prop_import_collection.set_grid(grid)
for (p_filename, p_keyword, p_property_kind, p_facet_type, p_facet, p_uom, p_time_index,
p_null_value, p_discrete, p_realization) in ab_property_list:
prop_import_collection.import_ab_property_to_cache(p_filename, p_keyword, grid.extent_kji, discrete = p_discrete,
uom = p_uom, time_index = p_time_index,
null_value = p_null_value,
property_kind = p_property_kind, facet_type = p_facet_type, facet = p_facet,
realization = p_realization)
# todo: property_kind, facet_type & facet are not currently getting passed through the imported_list tuple in resqml_property
if prop_import_collection is None:
log.warning('no pure binary grid properties to import')
else:
log.info('number of pure binary grid property arrays: ' + str(prop_import_collection.number_of_imports()))
# append to hdf5 file using arrays cached in grid property collection above
hdf5_file = model.h5_file_name()
log.debug('appending to hdf5 file: ' + hdf5_file)
grid.write_hdf5_from_caches(hdf5_file, mode = 'a', geometry = False, imported_properties = prop_import_collection, write_active = False)
# remove cached static property arrays from memory
if prop_import_collection is not None:
prop_import_collection.remove_all_cached_arrays()
# add imported properties parts to model, building property parts list
if prop_import_collection is not None and prop_import_collection.imported_list is not None:
prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid)
# mark model as modified
model.set_modified()
# store new version of model
log.info('storing model with additional properties in epc file: ' + epc_file)
model.store_epc(epc_file)
return model
def add_surfaces(epc_file, # existing resqml model
crs_uuid = None, # optional crs uuid, defaults to crs associated with model (usually main grid crs)
ext_uuid = None, # if None, uuid for hdf5 file holding main grid geometry will be used
surface_file_format = 'zmap', # zmap, rms (roxar) or GOCAD-Tsurf only formats currently supported
rq_class = 'surface', # 'surface' or 'mesh': the class of object to be created
surface_role = 'map', # 'map' or 'pick'
quad_triangles = False, # if True, 4 triangles per quadrangle will be used for mesh formats, otherwise 2
surface_file_list = None, # list of full file names (paths), each holding one surface
make_horizon_interpretations_and_features = True): # if True, feature and interpretation objects are created
"""Process a list of surface files, adding each surface as a new part in the resqml model."""
assert surface_file_list, 'surface file list is empty or missing'
assert surface_file_format in ['zmap', 'rms', 'roxar', 'GOCAD-Tsurf'], 'unsupported surface file format: ' + str(surface_file_format)
if 'TriangulatedSet' in rq_class: rq_class = 'surface'
elif 'Grid2d' in rq_class: rq_class = 'mesh'
assert rq_class in ['surface', 'mesh']
log.info('accessing existing resqml model from: ' + epc_file)
model = rq.Model(epc_file = epc_file)
assert model, 'failed to read existing resqml model from file: ' + epc_file
if crs_uuid is None:
assert model.crs_root is not None, 'no crs uuid given and no default in model'
crs_uuid = rqet.uuid_for_part_root(model.crs_root)
assert crs_uuid is not None
crs_root = model.root_for_uuid(crs_uuid)
if ext_uuid is None: ext_uuid = model.h5_uuid()
if ext_uuid is None: # no pre-existing hdf5 part or references in model
hdf5_file = epc_file[:-4] + '.h5'
ext_node = model.create_hdf5_ext(file_name = hdf5_file)
ext_uuid = rqet.uuid_for_part_root(ext_node)
h5_mode = 'w'
else:
hdf5_file = model.h5_file_name(uuid = ext_uuid)
h5_mode = 'a'
assert ext_uuid is not None, 'failed to establish hdf5 uuid'
# append to hdf5 file using arrays from Surface object's patch(es)
log.info('will append to hdf5 file: ' + hdf5_file)
for surf_file in surface_file_list:
_, short_name = os.path.split(surf_file)
dot = short_name.rfind('.')
if dot > 0: short_name = short_name[:dot]
log.info('surface ' + short_name + ' processing file: ' + surf_file + ' using format: ' + surface_file_format)
if rq_class == 'surface':
if surface_file_format == 'GOCAD-Tsurf':
surface = rqs.Surface(model, extract_from_xml = False,
tsurf_file = surf_file,
surface_role = surface_role,
quad_triangles = quad_triangles)
else:
surface = rqs.Surface(model, extract_from_xml = False,
mesh_file = surf_file, mesh_format = surface_file_format,
surface_role = surface_role,
quad_triangles = quad_triangles)
elif rq_class == 'mesh':
if surface_file_format == 'GOCAD-Tsurf':
log.info(f"Cannot convert a GOCAD-Tsurf to mesh, only to TriangulatedSurface - skipping file {surf_file}")
break
else:
surface = rqs.Mesh(model,
mesh_file = surf_file, mesh_format = surface_file_format, mesh_flavour = 'reg&z',
surface_role = surface_role,
crs_uuid = crs_uuid)
else:
log.critical('this is impossible')
# NB. surface may be either a Surface object or a Mesh object
log.debug('appending to hdf5 file for surface file: ' + surf_file)
surface.write_hdf5(hdf5_file, mode = h5_mode)
if make_horizon_interpretations_and_features:
feature = rqo.GeneticBoundaryFeature(model, kind = 'horizon', extract_from_xml = False, feature_name = short_name)
feature.create_xml()
interp = rqo.HorizonInterpretation(model, extract_from_xml = False,
genetic_boundary_feature = feature, domain = 'depth')
interp_root = interp.create_xml()
surface.set_represented_interpretation_root(interp_root)
surface.create_xml(ext_uuid, add_as_part = True, add_relationships = True,
crs_root = crs_root, root = None,
title = short_name + ' sourced from ' + surf_file,
originator = None)
# mark model as modified
model.set_modified()
# store new version of model
log.info('storing model with additional parts in epc file: ' + epc_file)
model.store_epc(epc_file)
return model
def grid_from_cp(model, cp_array, crs_uuid,
active_mask = None,
geometry_defined_everywhere = True, treat_as_nan = None,
dot_tolerance = 1.0, morse_tolerance = 5.0,
max_z_void = 0.1,
split_pillars = True, split_tolerance = 0.01, ijk_handedness = 'right',
known_to_be_straight = False):
"""Create a resqpy.grid.Grid object from a 7D corner point array.
notes:
this function sets up all the geometry arrays in memory but does not write to hdf5 nor create xml: use Grid methods;
geometry_defined_everywhere is deprecated, use treat_as_nan instead
"""
if treat_as_nan is None:
if not geometry_defined_everywhere: treat_as_nan = 'morse'
else:
assert treat_as_nan in ['none', 'dots', 'ij_dots', 'morse', 'inactive']
if treat_as_nan == 'none': treat_as_nan = None
geometry_defined_everywhere = (treat_as_nan is None)
assert cp_array.ndim == 7
nk, nj, ni = cp_array.shape[:3]
nk_plus_1 = nk + 1
nj_plus_1 = nj + 1
ni_plus_1 = ni + 1
if active_mask is None:
active_mask = np.ones((nk, nj, ni), dtype = 'bool')
inactive_mask = np.zeros((nk, nj, ni), dtype = 'bool')
else:
assert active_mask.shape == (nk, nj, ni)
inactive_mask = np.logical_not(active_mask)
all_active = np.all(active_mask)
if all_active and geometry_defined_everywhere:
cp_nan_mask = None
else:
cp_nan_mask = np.any(np.isnan(cp_array), axis = (3, 4, 5, 6)) # ie. if any nan per cell
if not geometry_defined_everywhere and not all_active:
if treat_as_nan == 'inactive':
log.debug('all inactive cell geometry being set to NaN')
cp_nan_mask = np.logical_or(cp_nan_mask, inactive_mask)
else:
if treat_as_nan == 'dots':
# for speed, only check primary diagonal of cells
log.debug('geometry for cells with no length to primary cell diagonal being set to NaN')
dot_mask = np.all(np.abs(cp_array[:, :, :, 1, 1, 1] - cp_array[:, :, :, 0, 0, 0]) < dot_tolerance, axis = -1)
elif treat_as_nan in ['ij_dots', 'morse']:
# check one diagonal of each I & J face
log.debug('geometry being set to NaN for inactive cells with no length to primary face diagonal for any I or J face')
dot_mask = np.zeros((nk, nj, ni), dtype = bool)
# k_face_vecs = cp_array[:, :, :, :, 1, 1] - cp_array[:, :, :, :, 0, 0]
j_face_vecs = cp_array[:, :, :, 1, :, 1] - cp_array[:, :, :, 0, :, 0]
i_face_vecs = cp_array[:, :, :, 1, 1, :] - cp_array[:, :, :, 0, 0, :]
dot_mask[:] = np.where(np.all(np.abs(j_face_vecs[:, :, :, 0]) < dot_tolerance, axis = -1), True, dot_mask)
dot_mask[:] = np.where(np.all( | np.abs(j_face_vecs[:, :, :, 1]) | numpy.abs |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://www.reddit.com/r/CoronavirusUS/comments/fqx8fn/ive_been_working_on_this_extrapolation_for_the/
# to explore : https://github.com/fcpenha/Gompertz-Makehan-Fit/blob/master/script.py
# Import required packages
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.dates as mdates
import copy, math
from lmfit import Model
import pandas as pd
import streamlit as st
import datetime as dt
from datetime import datetime, timedelta
import matplotlib.animation as animation
import imageio
import streamlit.components.v1 as components
import os
import platform
import webbrowser
from pandas import read_csv, Timestamp, Timedelta, date_range
from io import StringIO
from numpy import log, exp, sqrt, clip, argmax, put
from scipy.special import erfc, erf
from matplotlib.pyplot import subplots
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import ConciseDateFormatter, AutoDateLocator
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.backends.backend_agg import RendererAgg
_lock = RendererAgg.lock
from PIL import Image
import glob
# Functions to calculate values a,b and c ##########################
def exponential(x, a, b, c):
''' Standard gompertz function
a = height, b= halfway point, c = growth rate
https://en.wikipedia.org/wiki/Gompertz_function '''
return a * np.exp(-b * np.exp(-c * x))
def derivate(x, a, b, c):
''' First derivate of the Gompertz function. Might contain an error'''
return (np.exp(b * (-1 * np.exp(-c * x)) - c * x) * a * b * c ) + BASEVALUE
#return a * b * c * np.exp(-b*np.exp(-c*x))*np.exp(-c*x)
def derivate_of_derivate(x,a,b,c):
return a*b*c*(b*c*exp(-c*x) - c)*exp(-b*exp(-c*x) - c*x)
def gaussian(x, a, b, c):
''' Standard Guassian function. Doesnt give results, Not in use'''
return a * np.exp(-np.power(x - b, 2) / (2 * np.power(c, 2)))
def gaussian_2(x, a, b, c):
''' Another gaussian fuctnion. in use
a = height, b = cen (?), c= width '''
return a * np.exp(-((x - b) ** 2) / c)
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
# https://replit.com/@jsalsman/COVID19USlognormals
def lognormal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * erfc(- (log(x) - mu) / (s * sqrt(2)))
# https://en.wikipedia.org/wiki/Log-normal_distribution#Cumulative_distribution_function
def normal_c(x, s, mu, h): # x, sigma, mean, height
return h * 0.5 * (1 + erf((x - mu) / (s * sqrt(2))))
# #####################################################################
def find_gaussian_curvefit(x_values, y_values):
try:
popt_g2, pcov_g2 = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[0, 0, 0],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
except RuntimeError as e:
str_e = str(e)
st.error(f"gaussian fit :\n{str_e}")
return tuple(popt_g2)
def use_curvefit(x_values, x_values_extra, y_values, title, daterange,i):
"""
Use the curve-fit from scipy.
IN : x- and y-values. The ___-extra are for "predicting" the curve
"""
with _lock:
st.subheader(f"Curvefit (scipy) - {title}")
fig1x = plt.figure()
try:
a_start, b_start, c_start = 0,0,0
popt, pcov = curve_fit(
f=exponential,
xdata=x_values,
ydata=y_values,
#p0=[4600, 11, 0.5],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
exponential(x_values_extra, *popt),
"r-",
label="exponential fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Exponential fit :\n{str_e}")
try:
popt_d, pcov_d = curve_fit(
f=derivate,
xdata=x_values,
ydata=y_values,
#p0=[0, 0, 0],
p0 = [a_start, b_start, c_start ], # IC BEDDEN MAART APRIL
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
derivate(x_values_extra, *popt_d),
"g-",
label="derivate fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_d),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Derivate fit :\n{str_e}")
# FIXIT
# try:
# popt_growth, pcov_growth = curve_fit(
# f=growth,
# xdata=x_values,
# ydata=y_values,
# p0=[500, 0.0001],
# bounds=(-np.inf, np.inf),
# maxfev=10000,
# )
# plt.plot(
# x_values_extra,
# growth(x_values_extra, *popt_growth),
# "y-",
# label="growth: a=%5.3f, b=%5.3f" % tuple(popt_growth),
# )
# except:
# st.write("Error with growth model fit")
try:
popt_g, pcov_g = curve_fit(
f=gaussian_2,
xdata=x_values,
ydata=y_values,
p0=[a_start, b_start, c_start ],
bounds=(-np.inf, np.inf),
maxfev=10000,
)
plt.plot(
x_values_extra,
gaussian_2(x_values_extra, *popt_g),
"b-",
label="gaussian fit: a=%5.3f, b=%5.3f, c=%5.3f" % tuple(popt_g),
)
except RuntimeError as e:
str_e = str(e)
st.error(f"Gaussian fit :\n{str_e}")
plt.scatter(x_values, y_values, s=20, color="#00b3b3", label="Data")
plt.legend()
plt.title(f"{title} / curve_fit (scipy)")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
# POGING OM DATUMS OP DE X-AS TE KRIJGEN (TOFIX)
# plt.xlim(daterange[0], daterange[-1])
# lay-out of the x axis
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
# interval_ = 5
# plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=interval_))
# plt.gcf().autofmt_xdate()
#plt.show()
filename= (f"{OUTPUT_DIR}scipi_{title}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1x)
# def make_gif(filelist):
# # Create the frames
# frames = []
# imgs = glob.glob("*.png")
# for i in imgs:
# new_frame = Image.open(i)
# frames.append(new_frame)
#
# # Save into a GIF file that loops forever
# frames[0].save('png_to_gif.gif', format='GIF',
# append_images=frames[1:],
# save_all=True,
# duration=300, loop=0)
def use_lmfit(x_values, y_values, functionlist, title,i, max_y_values):
"""
Use lmfit.
IN : x- and y-values.
functionlist (which functions to use)
adapted from https://stackoverflow.com/a/49843706/4173718
TODO: Make all graphs in one graph
"""
a_start, b_start, c_start = 0,0,0
for function in functionlist:
#placeholder0.subheader(f"LMFIT - {title} - {function}")
# create a Model from the model function
if function == "exponential":
bmodel = Model(exponential)
formula = "a * np.exp(-b * np.exp(-c * x))"
elif function == "derivate":
bmodel = Model(derivate)
formula = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "gaussian":
bmodel = Model(gaussian_2)
formula = "a * np.exp(-((x - b) ** 2) / c)"
else:
st.write("Please choose a function")
st.stop()
# create Parameters, giving initial values
#params = bmodel.make_params(a=4711, b=12, c=0.06)
params = bmodel.make_params(a=a_start, b=b_start, c=c_start) # IC BEDDEN MAART APRIL
# params = bmodel.make_params()
params["a"].min = a_start
params["b"].min = b_start
params["c"].min = c_start
# do fit, st.write result
result = bmodel.fit(y_values, params, x=x_values)
a = round(result.params['a'].value,5)
b= round(result.params['b'].value,5)
c =round(result.params['c'].value,5)
placeholder1.text(result.fit_report())
with _lock:
#fig1y = plt.figure()
fig1y, ax1 = plt.subplots()
ax2 = ax1.twinx()
# plot results -- note that `best_fit` is already available
ax1.scatter(x_values, y_values, color="#00b3b3", s=2)
#ax1.plot(x_values, result.best_fit, "g")
res = (f"a: {a} / b: {b} / c: {c}")
plt.title(f"{title} / lmfit - {function}\n{formula}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
ax1.plot(t, bmodel.eval(result.params, x=t), "r-")
ax2.plot (t, derivate_of_derivate(t,a,b,c), color = 'purple')
ax2.axhline(linewidth=1, color='purple', alpha=0.5, linestyle="--")
#ax1.plot (t, derivate(t,26660.1, 9.01298, 0.032198), color = 'purple')
#ax2.plot (t, derivate_of_derivate(t,26660.1, 9.01298, 0.032198), color = 'yellow')
#plt.ylim(bottom=0)
#ax1.ylim(0, max_y_values*1.1)
#ax1.set_ylim(510,1200)
#ax2.set_ylim(0,12)
ax1.set_xlabel(f"Days from {from_}")
ax1.set_ylabel(f"{title} - red")
ax2.set_ylabel("delta - purple")
#plt.show()
filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
plt.savefig(filename, dpi=100, bbox_inches="tight")
placeholder.pyplot(fig1y)
if prepare_for_animation == False:
with _lock:
fig1z = plt.figure()
# plot results -- note that `best_fit` is already available
if function == "exponential":
plt.plot(t, derivate(t,a,b,c))
function_x = "derivate"
formula_x = "a * b * c * np.exp(b * (-1 * np.exp(-c * x)) - c * x)"
elif function == "derivate":
plt.plot(t, exponential(t, a,b,c))
function_x = "exponential"
formula_x = "a * np.exp(-b * np.exp(-c * x))"
else:
st.error("ERROR")
st.stop()
plt.title(f"{title} / {function_x}\n{formula_x}\n{res}")
t = np.linspace(0.0, TOTAL_DAYS_IN_GRAPH, 10000)
# use `result.eval()` to evaluate model given params and x
#plt.plot(t, bmodel.eval(result.params, x=t), "r-")
plt.ylim(bottom=0)
plt.xlabel(f"Days from {from_}")
plt.ylabel(title)
#plt.show()
#filename= (f"{OUTPUT_DIR}lmfit_{title}_{function}_{i}")
#plt.savefig(filename, dpi=100, bbox_inches="tight")
st.pyplot(fig1z)
return filename
def fit_the_values_really(x_values, y_values, which_method, title, daterange,i, max_y_values):
x_values_extra = np.linspace(
start=0, stop=TOTAL_DAYS_IN_GRAPH - 1, num=TOTAL_DAYS_IN_GRAPH
)
x_values = x_values[:i]
y_values = y_values[:i]
if prepare_for_animation == False:
use_curvefit(x_values, x_values_extra, y_values, title, daterange,i)
return use_lmfit(x_values,y_values, [which_method], title,i, max_y_values)
def fit_the_values(to_do_list , total_days, daterange, which_method, prepare_for_animation):
"""
We are going to fit the values
"""
# Here we go !
st.header("Fitting data to formulas")
infox = (
'<br>Exponential / Standard gompertz function : <i>a * exp(-b * np.exp(-c * x))</i></li>'
'<br>First derivate of the Gompertz function : <i>a * b * c * exp(b * (-1 * exp(-c * x)) - c * x)</i></li>'
'<br>Gaussian : <i>a * exp(-((x - b) ** 2) / c)</i></li>'
'<br>Working on growth model: <i>(a * 0.5 ^ (x / (4 * (math.log(0.5) / math.log(b)))))</i> (b will be the Rt-number)</li>'
)
st.markdown(infox, unsafe_allow_html=True)
global placeholder0, placeholder, placeholder1
placeholder0 = st.empty()
placeholder = st.empty()
placeholder1 = st.empty()
el = st.empty()
for v in to_do_list:
title = v[0]
y_values = v[1]
max_y_values = max(y_values)
# some preperations
number_of_y_values = len(y_values)
global TOTAL_DAYS_IN_GRAPH
TOTAL_DAYS_IN_GRAPH = total_days # number of total days
x_values = np.linspace(start=0, stop=number_of_y_values - 1, num=number_of_y_values)
if prepare_for_animation == True:
filenames = []
for i in range(5, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
filenames.append(filename)
# build gif
with imageio.get_writer('mygif.gif', mode='I') as writer:
for filename_ in filenames:
image = imageio.imread(f"{filename_}.png")
writer.append_data(image)
webbrowser.open('mygif.gif')
# Remove files
for filename__ in set(filenames):
os.remove(f"{filename__}.png")
else:
for i in range(len(x_values)-1, len(x_values)):
filename = fit_the_values_really(x_values, y_values, which_method, title, daterange, i, max_y_values)
# FIXIT
# aq, bq, cq = find_gaussian_curvefit(x_values, y_values)
# st.write(f"Find Gaussian curvefit - a:{aq} b:{bq} c: {cq}")
def select_period(df, show_from, show_until):
""" _ _ _ """
if show_from is None:
show_from = "2020-2-27"
if show_until is None:
show_until = "2020-4-1"
mask = (df[DATEFIELD].dt.date >= show_from) & (df[DATEFIELD].dt.date <= show_until)
df = df.loc[mask]
df = df.reset_index()
return df
def normal_c(df):
#https://replit.com/@jsalsman/COVID19USlognormals
st.subheader("Normal_c")
df = df.set_index(DATEFIELD)
firstday = df.index[0] + Timedelta('1d')
nextday = df.index[-1] + Timedelta('1d')
lastday = df.index[-1] + Timedelta(TOTAL_DAYS_IN_GRAPH - len(df), 'd') # extrapolate
with _lock:
#fig1y = plt.figure()
fig1yz, ax = subplots()
ax.set_title('NL COVID-19 cumulative log-lognormal extrapolations\n'
+ 'Source: repl.it/@jsalsman/COVID19USlognormals')
x = ((df.index - Timestamp('2020-01-01')) # independent
// Timedelta('1d')).values # small day-of-year integers
yi = df['Total_reported_cumm'].values # dependent
yd = df['Deceased_cumm'].values # dependent
exrange = range((Timestamp(nextday)
- Timestamp(firstday)) // Timedelta('1d'),
(Timestamp(lastday) + Timedelta('1d')
- Timestamp(firstday)) // Timedelta('1d')) # day-of-year ints
indates = date_range(df.index[0], df.index[-1])
exdates = date_range(nextday, lastday)
ax.scatter(indates, yi, color="#00b3b3", label='Infected')
ax.scatter(indates, yd, color="#00b3b3", label='Dead')
sqrt2 = sqrt(2)
im = Model(normal_c)
st.write (x)
iparams = im.make_params(s=0.3, mu=4.3, h=16.5)
st.write (iparams)
#iparams['s'].min = 0; iparams['h'].min = 0
iresult = im.fit(log(yi+1), iparams, x=x)
st.text('---- Infections:\n' + iresult.fit_report())
ax.plot(indates, exp(iresult.best_fit)-1, 'b', label='Infections fit')
ipred = iresult.eval(x=exrange)
ax.plot(exdates, exp(ipred)-1, 'b--',
label='Forecast: {:,.0f}'.format(exp(ipred[-1])-1))
iupred = iresult.eval_uncertainty(x=exrange, sigma=0.95) # 95% interval
iintlow = clip(ipred-iupred, ipred[0], None)
put(iintlow, range(argmax(iintlow), len(iintlow)), iintlow[ | argmax(iintlow) | numpy.argmax |
#!/usr/bin/env python3
from src import GradientMap
from PIL import Image
import torch.cuda
from torchvision import transforms
import numpy as np
import sys
# Settings
num_iters = 1000
# Enable CUDA
has_cuda = False
dev = None
if torch.cuda.is_available():
dev = torch.device('cuda')
if dev.type == 'cuda':
has_cuda = True
if not has_cuda:
print("No cuda device found. CUDA test is disabled.")
# Load Images
print("Opening images ... ", end=''); sys.stdout.flush()
# Foreground and background are switched. Combined with a negative offset, the pasted gradients overlap on all sides.
# This tests all special cases I could think off. (eg. unrolling and border handling)
fg = Image.open('img/bg.png')
bg = Image.open('img/fg.png')
print("done")
# Compute CPU result
print("Computing CPU results ... ", end=''); sys.stdout.flush()
bg_grad = GradientMap.from_image(bg)
fg_grad = GradientMap.from_image(fg)
bg_grad.paste_gradient(fg_grad, -30, -30)
bg_grad.reconstruct(num_iters)
result_cpu = bg_grad.get_image()
print("done")
# Compute CUDA result
if has_cuda:
print("Computing CUDA results ... ", end=''); sys.stdout.flush()
bg_grad = GradientMap.from_image(bg, device=dev)
fg_grad = GradientMap.from_tensor(transforms.ToTensor()(np.asarray(fg)).to(device=dev))
bg_grad.paste_gradient(fg_grad, -30, -30)
bg_grad.reconstruct(num_iters)
result_cuda = bg_grad.get_image()
print("done")
# Load Reference result
result_reference = Image.open('img/result_1000.png')
# Init exit state
exit_state = 0
print()
# Compare CPU and Reference
if not np.array_equal(np.asarray(result_reference), | np.asarray(result_cpu) | numpy.asarray |
import math
import unittest
import logging
import re
import numpy as np
from imageio import imread
from scipy.ndimage.interpolation import rotate
from autocnet.examples import get_path
from autocnet.transformation import roi
from .. import ciratefi
import pytest
# Can be parameterized for more exhaustive tests
upsampling = 10
alpha = math.pi/2
cifi_thresh = 90
rafi_thresh = 90
tefi_thresh = 100
use_percentile = True
radii = list(range(1, 3))
@pytest.fixture
def img():
return imread(get_path('AS15-M-0298_SML.png'), as_gray=True)
@pytest.fixture
def img_coord():
return 482.09783936, 652.40679932
@pytest.fixture
def template(img, img_coord):
coord_x, coord_y = img_coord
template= roi.Roi(img, coord_x, coord_y, 5, 5).clip()
template = rotate(template, 90)
return template
@pytest.fixture
def search():
coord_x, coord_y = (482.09783936, 652.40679932)
img = imread(get_path('AS15-M-0298_SML.png'), as_gray=True)
search = roi.Roi(img, coord_x, coord_y, 21, 21).clip()
return search
@pytest.fixture
def offset_template(img, img_coord):
coord_x, coord_y = img_coord
coord_x += 1
coord_y += 1
offset_template = roi.Roi(img, coord_x, coord_y, 5, 5).clip()
return offset_template
def test_cifi_radii_too_large(template, search, caplog):
# check all logs
ciratefi.cifi(template, search, 1.0, radii=[100], use_percentile=False)
num_pattern = '\d+'
captured_log = caplog.records[0].getMessage()
match = re.findall(num_pattern, captured_log)
assert (f'Max Radii is larger than original template, '\
f'this may produce sub-par results.Max radii: {match[0]} max template dimension: {match[1]}'\
== caplog.records[0].getMessage())
def test_cifi_bounds_error(template, search):
with pytest.raises(ValueError), pytest.warns(UserWarning):
ciratefi.cifi(template, search, -1.1, use_percentile=False)
def test_cifi_radii_none_error(template, search):
with pytest.raises(ValueError):
ciratefi.cifi(template, search, 90, radii=None)
def test_cifi_scales_none_error(template, search):
with pytest.raises(ValueError):
ciratefi.cifi(template, search, 90, scales=None)
def test_cifi_template_too_large_error(template, search):
with pytest.raises(ValueError):
ciratefi.cifi(search,template, 90, scales=None)
@pytest.mark.parametrize('cifi_thresh, radii', [(90,list(range(1, 3)))])
@pytest.mark.filterwarnings('ignore::UserWarning') # skimage deprecation warnings to move to new defaults
def test_cifi(template, search, cifi_thresh, radii):
pixels, scales = ciratefi.cifi(template, search, thresh=cifi_thresh,
radii=radii, use_percentile=True)
assert search.shape == scales.shape
assert (np.floor(search.shape[0]/2), np.floor(search.shape[1]/2)) in pixels
assert pixels.size in range(0,search.size)
@pytest.mark.filterwarnings('ignore::UserWarning') # skimage deprecation warnings to move to new defaults
def test_rafi_warning(template, search,caplog):
rafi_pixels = [(10, 10)]
rafi_scales = np.ones(search.shape, dtype=float)
ciratefi.rafi(template, search, rafi_pixels,
rafi_scales, thresh=1, radii=[100],
use_percentile=False)
def test_rafi_bounds_error(template, search):
rafi_pixels = [(10, 10)]
rafi_scales = | np.ones(search.shape, dtype=float) | numpy.ones |
from problem2 import *
import numpy as np
import sys
from sklearn.datasets import make_classification
'''
Unit test 2:
This file includes unit tests for problem2.py.
You could test the correctness of your code by typing `nosetests -v test2.py` in the terminal.
'''
#-------------------------------------------------------------------------
def test_python_version():
''' ----------- Problem 2 (50 points in total)--------------'''
assert sys.version_info[0]==3 # require python 3
#-------------------------------------------------------------------------
def test_compute_z1():
'''(2 point) compute_z1'''
x = np.mat('1.; 2.; 3.')
W1 = np.mat([[0.5,-0.6,0.3],
[0.6,-0.5,0.2]])
b1 = np.mat('0.2; 0.3')
z1 = compute_z1(x,W1,b1)
assert type(z1) == np.matrixlib.defmatrix.matrix
assert z1.shape == (2,1)
assert np.allclose(z1, np.mat([0.4,0.5]).T, atol = 1e-3)
x = np.mat([2., 5.,2.]).T
z1 = compute_z1(x,W1,b1)
assert np.allclose(z1.T, [-1.2,-0.6], atol = 1e-3)
#-------------------------------------------------------------------------
def test_compute_a1():
'''(3 point) compute_a1'''
z1 = np.mat([0.,1.]).T
a1 = compute_a1(z1)
assert type(a1) == np.matrixlib.defmatrix.matrix
assert a1.shape == (2,1)
assert np.allclose(a1.T, [0.5,0.731], atol = 1e-3)
z1 = np.mat([-1.,-100., 100]).T
a1 = compute_a1(z1)
assert a1.shape == (3,1)
assert np.allclose(a1.T, [0.2689, 0, 1], atol = 1e-2)
np.seterr(all='raise')
z1 = np.mat([1000., 1000.]).T
a1 = compute_a1(z1)
assert np.allclose(a1.T, [1., 1.], atol = 1e-2)
assert np.allclose(z1.T, [1000, 1000])
z1 = np.mat([-1000., -1000.]).T
a1 = compute_a1(z1)
assert np.allclose(a1.T, [0., 0.], atol = 1e-2)
assert np.allclose(z1.T, [-1000, -1000])
a1 = compute_a1(np.mat([1000., 100.]).T)
assert np.allclose(a1.T, [1., 1.], atol = 1e-2)
a = compute_a1(np.mat([-1000., -10.]).T)
assert np.allclose(a.T, [0., 0.], atol = 1e-2)
#-------------------------------------------------------------------------
def test_compute_z2():
'''(2 point) compute_z2'''
x = np.mat([1., 2., 3.]).T
W2 = np.mat([[0.5,-0.6,0.3],
[0.6,-0.5,0.2]])
b2 = np.mat([0.2, 0.3]).T
z2 = compute_z2(x,W2,b2)
assert type(z2) == np.matrixlib.defmatrix.matrix
assert z2.shape == (2,1)
assert np.allclose(z2.T, [0.4,0.5], atol = 1e-3)
x = np.mat([2., 5.,2.]).T
z2 = compute_z2(x,W2,b2)
assert np.allclose(z2.T, [-1.2,-0.6], atol = 1e-3)
#-------------------------------------------------------------------------
def test_compute_a2():
'''(3 point) compute_a2'''
z = np.mat([1., 1.]).T
a = compute_a2(z)
assert type(a) == np.matrixlib.defmatrix.matrix
assert np.allclose(a.T, [0.5, 0.5], atol = 1e-2)
assert np.allclose(z.T, [1., 1.])
a = compute_a2(np.mat([1., 1.,1., 1.]).T)
assert np.allclose(a.T, [0.25, 0.25, 0.25, 0.25], atol = 1e-2)
a = compute_a2(np.mat([-1., -1.,-1., -1.]).T)
assert np.allclose(a.T, [0.25, 0.25, 0.25, 0.25], atol = 1e-2)
a = compute_a2(np.mat([-2., -1.,1., 2.]).T)
assert np.allclose(a.T, [ 0.01275478,0.03467109,0.25618664,0.69638749], atol = 1e-2)
a = compute_a2(np.mat([100., 100.]).T)
assert np.allclose(a.T, [0.5, 0.5], atol = 1e-2)
a = compute_a2(np.mat([-100., -100.]).T)
assert np.allclose(a.T, [0.5, 0.5], atol = 1e-2)
np.seterr(all='raise')
z = np.mat([1000., 1000.]).T
a = compute_a2(z)
assert np.allclose(a.T, [0.5, 0.5], atol = 1e-2)
assert np.allclose(z.T, [1000, 1000])
z = np.mat([-1000., -1000.]).T
a = compute_a2(z)
assert np.allclose(a.T, [0.5, 0.5], atol = 1e-2)
assert np.allclose(z.T, [-1000, -1000])
a = compute_a2(np.mat([1000., 10.]).T)
assert np.allclose(a.T, [1., 0.], atol = 1e-2)
a = compute_a2(np.mat([-1000., -10.]).T)
assert np.allclose(a.T, [0., 1.], atol = 1e-2)
#-------------------------------------------------------------------------
def test_forward():
'''(2 point) forward'''
x = np.mat([1., 2.,3.,4]).T
# first layer with 3 neurons
W1 = np.mat([[0.,0.,0.,0.],
[0.,0.,0.,0.],
[0.,0.,0.,0.]])
b1 = np.mat([0.,0.,0.]).T
# second layer with 2 neurons
W2 = np.mat([[0.,0.,0.],
[0.,0.,0.]])
b2 = np.mat([100.,0.]).T
z1, a1, z2, a2 = forward(x,W1,b1,W2,b2)
assert type(z1) == np.matrixlib.defmatrix.matrix
assert type(a1) == np.matrixlib.defmatrix.matrix
assert z1.shape == (3,1)
assert a1.shape == (3,1)
assert type(z2) == np.matrixlib.defmatrix.matrix
assert type(a2) == np.matrixlib.defmatrix.matrix
assert z2.shape == (2,1)
assert a2.shape == (2,1)
assert np.allclose(z1.T, [0,0,0], atol = 1e-3)
assert np.allclose(a1.T, [0.5,0.5,0.5], atol = 1e-3)
assert np.allclose(z2.T, [100,0], atol = 1e-3)
assert np.allclose(a2.T, [1,0], atol = 1e-3)
#-------------------------------------------------------------------------
def test_compute_dL_da2():
'''(2 point) compute_dL_da2'''
a = np.mat([0.5,0.5]).T
y = 1
dL_da = compute_dL_da2(a,y)
assert type(dL_da) == np.matrixlib.defmatrix.matrix
assert dL_da.shape == (2,1)
assert np.allclose(dL_da.T, [0.,-2.], atol= 1e-3)
a = np.mat([0.5,0.5]).T
y = 0
dL_da = compute_dL_da2(a,y)
assert np.allclose(dL_da.T, [-2.,0.], atol= 1e-3)
a = np.mat([0.1,0.6,0.1,0.2]).T
y = 3
dL_da = compute_dL_da2(a,y)
assert np.allclose(dL_da.T, [0.,0.,0.,-5.], atol= 1e-3)
a = np.mat([1.,0.]).T
y = 1
dL_da = compute_dL_da2(a,y)
np.seterr(all='raise')
assert np.allclose(dL_da[0], 0., atol= 1e-3)
assert dL_da[1] < -1e5
assert dL_da[1] > -float('Inf')
assert np.allclose(a.T, [1.,0.])
#-------------------------------------------------------------------------
def test_compute_da2_dz2():
'''(2 point) compute_da2_dz2'''
a = np.mat([0.3, 0.7]).T
da_dz = compute_da2_dz2(a)
assert type(da_dz) == np.matrixlib.defmatrix.matrix
assert da_dz.shape == (2,2)
assert np.allclose(da_dz, [[.21,-.21],[-.21,.21]], atol= 1e-3)
a = np.mat([0.1, 0.2, 0.7]).T
da_dz = compute_da2_dz2(a)
assert da_dz.shape == (3,3)
da_dz_true = np.mat( [[ 0.09, -0.02, -0.07],
[-0.02, 0.16, -0.14],
[-0.07, -0.14, 0.21]])
assert np.allclose(da_dz,da_dz_true,atol= 1e-3)
#-------------------------------------------------------------------------
def test_compute_dz2_dW2():
'''(2 point) compute_dz2_dW2'''
x = np.mat([1., 2.,3.]).T
dz_dW = compute_dz2_dW2(x,2)
assert type(dz_dW) == np.matrixlib.defmatrix.matrix
assert dz_dW.shape == (2,3)
dz_dW_true = np.mat([[1., 2.,3],[1., 2.,3]])
assert np.allclose(dz_dW, dz_dW_true, atol=1e-2)
#-------------------------------------------------------------------------
def test_compute_dz2_db2():
'''(2 point) compute_dz2_db2'''
dz_db = compute_dz2_db2(2)
assert type(dz_db) == np.matrixlib.defmatrix.matrix
assert dz_db.shape == (2,1)
dz_db_true = np.mat([1.,1.])
assert np.allclose(dz_db, dz_db_true, atol=1e-2)
#-------------------------------------------------------------------------
def test_compute_dz2_da1():
'''(2 point) compute_dz2_da1'''
W2= np.mat([[1.,
.4,3.],
[8.,.5,
.2]])+.32
dz2_da1 = compute_dz2_da1(W2)
assert type(dz2_da1) == np.matrixlib.defmatrix.matrix
assert dz2_da1.shape == (2,3)
print (dz2_da1)
assert np.allclose(dz2_da1, [[ 1.32, 0.72, 3.32], [ 8.32, 0.82, 0.52]], atol= 1e-3)
#-------------------------------------------------------------------------
def test_compute_da1_dz1():
'''(2 point) compute_da1_dz1'''
a1= np.mat([.5,.5,.3,.6]).T
da1_dz1 = compute_da1_dz1(a1)
assert type(da1_dz1) == np.matrixlib.defmatrix.matrix
assert da1_dz1.shape == (4,1)
assert np.allclose(da1_dz1.T, [.25,.25,.21,.24], atol= 1e-3)
#-------------------------------------------------------------------------
def test_compute_dz1_dW1():
'''(2 point) compute_dz1_dW1'''
x = np.mat([1., 2.,3.]).T
dz_dW = compute_dz1_dW1(x,2)
assert type(dz_dW) == np.matrixlib.defmatrix.matrix
assert dz_dW.shape == (2,3)
dz_dW_true = np.mat([[1., 2.,3],[1., 2.,3]])
assert np.allclose(dz_dW, dz_dW_true, atol=1e-2)
#-------------------------------------------------------------------------
def test_compute_dz1_db1():
'''(2 point) compute_dz1_db1'''
dz_db = compute_dz1_db1(2)
assert type(dz_db) == np.matrixlib.defmatrix.matrix
assert dz_db.shape == (2,1)
dz_db_true = np.mat([1.,1.])
assert np.allclose(dz_db, dz_db_true, atol=1e-2)
#-------------------------------------------------------------------------
def test_backward():
'''(4 point) backward'''
x = np.mat([1., 2.,3.,4]).T
y = 1
# first layer with 3 hidden neurons
W1 = np.mat([[0.,0.,0.,0.],
[0.,0.,0.,0.],
[0.,0.,0.,0.]])
b1 = np.mat([0.,0.,0.]).T
# second layer with 2 hidden neurons
W2 = np.mat([[0.,0.,0.],
[0.,0.,0.]])
b2 = np.mat([0.,0.]).T
z1, a1, z2, a2 = forward(x, W1, b1, W2, b2)
dL_da2, da2_dz2, dz2_dW2, dz2_db2, dz2_da1, da1_dz1, dz1_dW1, dz1_db1= backward(x,y,a1,a2, W2)
assert type(dL_da2) == np.matrixlib.defmatrix.matrix
assert dL_da2.shape == (2,1)
np.allclose(dL_da2.T,[0.,-2.],atol=1e-3)
assert type(da2_dz2) == np.matrixlib.defmatrix.matrix
assert da2_dz2.shape == (2,2)
np.allclose(da2_dz2,[[.25,-.25],[-.25,.25]],atol=1e-3)
assert type(dz2_dW2) == np.matrixlib.defmatrix.matrix
assert dz2_dW2.shape == (2,3)
np.allclose(dz2_dW2,[[.5,.5,.5],[.5,.5,.5]],atol=1e-3)
assert type(dz2_db2) == np.matrixlib.defmatrix.matrix
assert dz2_db2.shape == (2,1)
np.allclose(dz2_db2.T,[1,1],atol=1e-3)
assert type(dz2_da1) == np.matrixlib.defmatrix.matrix
assert dz2_da1.shape == (2,3)
t = [[ 0., 0., 0.],
[ 0., 0., 0.]]
np.allclose(dz2_da1,t,atol=1e-3)
assert type(da1_dz1) == np.matrixlib.defmatrix.matrix
assert da1_dz1.shape == (3,1)
np.allclose(da1_dz1.T,[.25,.25,.25],atol=1e-3)
assert type(dz1_dW1) == np.matrixlib.defmatrix.matrix
assert dz1_dW1.shape == (3,4)
t = [[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]]
np.allclose(dz1_dW1,t,atol=1e-3)
assert type(dz1_db1) == np.matrixlib.defmatrix.matrix
assert dz1_db1.shape == (3,1)
np.allclose(dz1_db1.T,[1,1,1],atol=1e-3)
#-------------------------------------------------------------------------
def test_compute_dL_da1():
'''(3 point) compute_dL_da1'''
dL_dz2 = np.mat([ 0.09554921, 0.14753129, 0.47769828,-0.72077878]).T
dz2_da1 = np.mat([[ 0.26739761, 0.73446399, 0.24513834],
[ 0.80682023, 0.7841972 , 0.01415917],
[ 0.70592854, 0.73489433, 0.91355454],
[ 0.8558265 , 0.84993468, 0.24702029]])
dL_da1 = compute_dL_da1(dL_dz2,dz2_da1)
assert type(dL_da1) == np.matrixlib.defmatrix.matrix
assert dL_da1.shape == (3,1)
dL_da1_true = np.mat([-0.13505987,-0.07568605, 0.28386814]).T
assert np.allclose(dL_da1, dL_da1_true, atol=1e-3)
#-------------------------------------------------------------------------
def test_compute_dL_dz1():
'''(3 point) compute_dL_dz1'''
dL_da1 = np.mat([-0.03777044, 0.29040313,-0.42821076,-0.28597724 ]).T
da1_dz1 = np.mat([ 0.03766515, 0.09406613, 0.06316817, 0.05718137]).T
dL_dz1 = compute_dL_dz1(dL_da1, da1_dz1)
print (dL_dz1)
assert type(dL_dz1) == np.matrixlib.defmatrix.matrix
assert dL_dz1.shape == (4,1)
dL_dz1_true = np.mat([-0.00142263, 0.0273171, -0.02704929,-0.01635257]).T
assert np.allclose(dL_dz1, dL_dz1_true, atol=1e-3)
##-------------------------------------------------------------------------
def test_compute_gradients():
'''(4 point) compute_gradients'''
x = np.mat([1., 2.,3.,4]).T
y = 1
# first layer with 3 hidden neurons
W1 = np.mat([[0.,0.,0.,0.],
[0.,0.,0.,0.],
[0.,0.,0.,0.]])
b1 = np.mat([0.,0.,0.]).T
# second layer with 2 hidden neurons
W2 = np.mat([[0.,0.,0.],
[0.,0.,0.]])
b2 = np.mat([0.,0.]).T
# forward pass
z1, a1, z2, a2 = forward(x, W1, b1, W2, b2)
print ('a1:', a1)
# backward pass: prepare local gradients
dL_da2, da2_dz2, dz2_dW2, dz2_db2, dz2_da1, da1_dz1, dz1_dW1, dz1_db1= backward(x,y,a1,a2, W2)
# call the function
dL_dW2, dL_db2, dL_dW1, dL_db1 = compute_gradients(dL_da2, da2_dz2, dz2_dW2, dz2_db2, dz2_da1, da1_dz1, dz1_dW1, dz1_db1)
assert type(dL_dW2) == np.matrixlib.defmatrix.matrix
assert dL_dW2.shape == (2,3)
t = [[ 0.25, 0.25, 0.25],
[-0.25,-0.25,-0.25]]
np.allclose(dL_dW2,t,atol=1e-3)
assert type(dL_db2) == np.matrixlib.defmatrix.matrix
assert dL_db2.shape == (2,1)
t = [0.5,-0.5]
np.allclose(dL_db2.T,t,atol=1e-3)
assert type(dL_dW1) == np.matrixlib.defmatrix.matrix
assert dL_dW1.shape == (3,4)
t = np.zeros((3,4))
| np.allclose(dL_dW1,t,atol=1e-3) | numpy.allclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCMC-estimation of status transition rates from IUCN record
Created on Mon Oct 28 14:43:44 2019
@author: <NAME> (<EMAIL>)
"""
import numpy as np
np.set_printoptions(suppress=True)
import pandas as pd
import os,sys
import datetime
from scipy.optimize import curve_fit
import warnings
import iucn_sim.functions as cust_func
# get extinction probs_________________________________________________________
def p_e_year(years,p_e):
pe_year = 1-(1-float(p_e))**(1/years)
return pe_year
def update_multiplier(q,d=1.1):
u = np.random.uniform(0,1)
l = 2*np.log(d)
m = np.exp(l*(u-.5))
new_q = q * m
return new_q, np.log(m)
def sample_rate_mcmc(count, tot_time, n_samples = 1, n_gen = 100000,burnin = 1000):
def get_loglik(count, dT, rate):
return np.log(rate)*count - dT*rate
post_samples = []
q = 0.01
likA = get_loglik(count,tot_time,q)
for i in range(n_gen):
new_q, hast = update_multiplier(q)
lik = get_loglik(count,tot_time,new_q)
if lik-likA + hast >= np.log(np.random.random()):
q = new_q
likA = lik
if i > burnin and i % 10==0:
post_samples.append(q)
sampled_rates = np.random.choice(post_samples,n_samples,replace=False)
return sampled_rates
def power_function(x,a,b):
# defining the power function
y = float(a)*x**float(b)
return y
def make_empty_rate_df(species_list,rate_columns,status_label):
rate_df = pd.DataFrame(np.zeros((len(species_list),rate_columns+1)))
rate_df.columns = ['species']+ ['%s_p_ext_%i'%(status_label,i) for i in np.arange(0,rate_columns)]
rate_df.species = species_list
return rate_df
def add_arguments(parser):
parser.add_argument(
'--species_data',
required=True,
metavar='<path>',
help="File containing species list and current IUCN status of species, as well as generation length (GL) data estimates if available. GL data is only used for '--extinction_probs_mode 0' ('species_data.txt' output from get_iucn_data function).",
)
parser.add_argument(
'--iucn_history',
required=True,
metavar='<path>',
help="File containing IUCN history of the reference group for transition rate estimation ('*_iucn_history.txt' output of get_iucn_data function)."
)
parser.add_argument(
'--outdir',
required=True,
metavar='<path>',
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--extinction_probs_mode',
default=0,
metavar='N',
help="Set to '0' to use the critE EX mode to determine extinction probabilities for each status (e.g. Mooers et al, 2008 approach). Set to '1' to use empirical EX mode, based on the recorded extinction in the IUCN history of the reference group (e.g. Monroe et al, 2019 approach). GL data can only be used in the critE EX mode ('0')."
)
parser.add_argument(
'--possibly_extinct_list',
default=0,
metavar='<path>',
help="File containing list of taxa that are likely extinct, but that are listed as extant in IUCN, including the year of their assessment as possibly extinct ('possibly_extinct_reference_taxa.txt' output from get_iucn_data function). These species will then be modeled as extinct by the esimate_rates function, which will effect the estimated extinction probabilities when chosing `--extinction_probs_mode 1`",
)
parser.add_argument(
'--species_specific_regression',
action='store_true',
help='Enables species-specific regression fitting to model LC, NT, and VU extinction probabilities. Only applicable with --extinction_probs_mode 0 (critE mode) and if GL is provided.',
default=False
)
parser.add_argument(
'--rate_samples',
default=100,
metavar='N',
help="How many rates to sample from the posterior transition rate estimates. These rates will be used to populate transition rate q-matrices for downstream simulations. Later on you can still chose to run more simulation replicates than the here specified number of produced transition rate q-matrices, in which case the `run_sim` function will randomely resample from the available q-matrices (default=100, this is ususally sufficient, larger numbers can lead to very high output file size volumes)."
)
parser.add_argument(
'--n_gen',
default=100000,
metavar='N',
help="Number of generations for MCMC for transition rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
metavar='N',
help="Burn-in for MCMC for transition rate estimation (default=1000)."
)
parser.add_argument(
'--seed',
default=None,
help="Set random seed for the MCMC."
)
def main(args):
# get user input___________________________________________________________
input_data = args.species_data
iucn_history = args.iucn_history
outdir = args.outdir
try:
extinction_probs_mode = int(args.extinction_probs_mode)
except:
print('\nInvalid extinction_probs_mode provided. Please choose between the currenlty available options 0 or 1')
quit()
possibly_extinct_list = args.possibly_extinct_list
n_rep = int(args.rate_samples)
n_gen = int(args.n_gen)
burnin = int(args.burnin)
if not os.path.exists(outdir):
os.makedirs(outdir)
seed = args.seed
try:
random_seed = False
seed = int(seed)
except:
seed = np.random.randint(999999999)
random_seed = True
np.random.seed(seed)
np.savetxt(os.path.join(outdir,'starting_seed.txt'),np.array([seed]),fmt='%i')
# get input data
species_data_input = pd.read_csv(input_data,sep='\t',header=None).dropna()
invalid_status_taxa = species_data_input[~species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
if len(invalid_status_taxa)>0:
print('\nFound invalid IUCN statuses:',list(invalid_status_taxa[1].values),'\n\nMake sure that the second column of your --species_data input contains the current IUCN status of your target species, which must be one of the following valid extant statuses: LC, NT, VU, EN, CR, DD, NE')
# if this effects only a minority of taxa, continue after removing these
if len(invalid_status_taxa)/len(species_data_input) < 0.5:
print('\nAutomatically dropping the following taxa because of invalid IUCN status information:', list(invalid_status_taxa[0].values))
species_data_input = species_data_input[species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
else:
quit('\nPlease fix your species_data input file. Check presence of current IUCN status information and column order.')
# get the list of species
species_list = species_data_input.iloc[:,0].values.astype(str)
# replace underscores in species name in case they are present
species_list = np.array([i.replace('_',' ') for i in species_list])
# Check if all species names are binomial
for species in species_list:
if len(species.split(' ')) != 2:
print('ERROR','*'*50,'\nABORTED: All provided species names provided under --species_data flag must be binomial! Found non binomial name:\n%s\n'%species,'*'*50)
quit()
# get the current IUCN status of all species
current_status = species_data_input.iloc[:,1].values.astype(str)
# get GL data if provided
gl_data_available = False
if species_data_input.shape[1] > 2:
gl_matrix = species_data_input.iloc[:,2:].values
gl_data_available = True
#__________________________________________________________________________
# process the IUCN history data____________________________________________
iucn_start_year = 2001 #start-year of the IUCN3.1 standard for categories
current_year = datetime.datetime.now().year
master_stat_time_df = pd.DataFrame(columns=['species']+list(np.arange(iucn_start_year,current_year+1).astype(str)))
statuses_through_time = pd.read_csv(iucn_history, delimiter = '\t')
target_columns = [column for column in master_stat_time_df.columns if column in statuses_through_time.columns]
master_stat_time_df[target_columns] = statuses_through_time[target_columns]
# treat EW as EX
master_stat_time_df.replace('EW', 'EX',inplace=True)
# replace occurrences of NR (not recognized) with nan
master_stat_time_df.replace('NR', np.nan,inplace=True)
# clean and sort df
master_stat_time_df = master_stat_time_df.sort_values(by='species')
master_stat_time_df = master_stat_time_df.drop_duplicates()
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# set the assessment at the current year to NE for species without any assessments
na_row_indeces = np.where(master_stat_time_df.iloc[:,1:].T.isnull().all().values)
for index in na_row_indeces:
master_stat_time_df.iloc[index,-1] = 'NE'
# if possibly_extinct_list provided, read that list and set the status for those taxa to extinct, starting at provided year
if possibly_extinct_list:
pex_data = pd.read_csv(possibly_extinct_list,sep='\t')
pex_species_list = pex_data.iloc[:,0].values.astype(str)
pex_year = pex_data.iloc[:,1].values.astype(int)
column_names = master_stat_time_df.columns.values
row_names = master_stat_time_df.species.values
#df_selection = master_stat_time_df[master_stat_time_df.species.isin(pex_species_list)]
for i,species in enumerate(pex_species_list):
row_index = np.where(row_names==species)[0][0]
assessment_year = pex_year[i]
column_index = np.where(column_names==str(assessment_year))[0][0]
master_stat_time_df.iloc[row_index,column_index:] = 'EX'
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# extinciton prob mode 0: remove all currently extinct taxa
if extinction_probs_mode == 0:
ext_indices = np.array([num for num,i in enumerate(most_recent_status_dict.keys()) if most_recent_status_dict[i] == 'EX'])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# replace any occurrence of 'EX' as a past status with NaN to avoid problems with counting types of transitions (treating these assessments as invalid)
master_stat_time_df.replace('EX', np.nan,inplace=True)
# extinciton prob mode 1: remove only taxa that have been extinct all along, keeping those that have recorded transition to extinct within time frame
elif extinction_probs_mode == 1:
ext_indices = np.array([num for num,i in enumerate(master_stat_time_df.iloc[:,1:].values.astype(str)) if 'EX' in np.unique(i) and len(np.unique(i))==2])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# write IUCN history df to file
master_stat_time_df.to_csv(os.path.join(outdir,'formatted_iucn_history_reference_taxa.txt'),sep='\t')
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# count current status distribution
unique, counts = np.unique(status_series, return_counts=True)
print('\nCurrent IUCN status distribution in reference group:',dict(zip(unique, counts)))
# count how often each status change occurs
change_type_dict = cust_func.count_status_changes(master_stat_time_df,valid_status_dict)
print('Summing up years spend in each category ...')
years_in_each_category = cust_func.get_years_spent_in_each_category(master_stat_time_df,valid_status_dict)
# write the status change data to file
final_years_count_array = np.array([list(years_in_each_category.keys()),list(years_in_each_category.values())]).T
np.savetxt(os.path.join(outdir,'years_spent_in_each_category.txt'),final_years_count_array,fmt='%s\t%s')
change_type_dict_array = np.array([list(change_type_dict.keys()),list(change_type_dict.values())]).T
np.savetxt(os.path.join(outdir,'change_type_dict.txt'),change_type_dict_array,fmt='%s\t%s')
#__________________________________________________________________________
# sample transition rates for all types of changes_________________________
if extinction_probs_mode == 0:
status_change_coutn_df = pd.DataFrame(data=np.zeros([6,6]).astype(int),index = ['LC','NT','VU','EN','CR','DD'],columns=['LC','NT','VU','EN','CR','DD'])
elif extinction_probs_mode == 1:
status_change_coutn_df = pd.DataFrame(data=np.zeros([7,7]).astype(int),index = ['LC','NT','VU','EN','CR','DD','EX'],columns=['LC','NT','VU','EN','CR','DD','EX'])
for status_change in change_type_dict.keys():
states = status_change.split('->')
original_state = states[0]
new_state = states[1]
count = change_type_dict[status_change]
status_change_coutn_df.loc[original_state,new_state] = count
status_change_coutn_df.to_csv(os.path.join(outdir,'status_change_counts.txt'),sep='\t',index=True)
print('Counted the following transition occurrences in IUCN history of reference group:')
print(status_change_coutn_df)
if not random_seed:
print('Running MCMC with user-set starting seed %i ...'%seed)
else:
print('Running MCMC with randomely generated starting seed %i ...'%seed)
sampled_rates_df = pd.DataFrame(columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)])
for status_a in status_change_coutn_df.columns:
row = status_change_coutn_df.loc[status_a]
for status_b in row.index.values:
if not status_a == status_b:
count = row[status_b]
total_time = years_in_each_category[status_a]
rates = sample_rate_mcmc(count, total_time, n_samples = n_rep, n_gen = n_gen, burnin = burnin)
sampled_rates_df = sampled_rates_df.append(pd.DataFrame(data=np.matrix(['%s->%s'%(status_a,status_b)]+list(rates)),columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)]),ignore_index=True)
sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]] = sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]].apply(pd.to_numeric)
sampled_rates_df.to_csv(os.path.join(outdir,'sampled_status_change_rates.txt'),sep='\t',index=False,float_format='%.8f')
print('Sampled %i rates from MCMC posterior for each transition type.'%n_rep)
#__________________________________________________________________________
# if mode 0, calculate extinction probabilities for EN and CR with GL data_________________________
if extinction_probs_mode == 0:
# calculate yearly extinction risks for categories EN and CR
if gl_data_available:
dims = gl_matrix.shape[1]
en_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
en_risks_species = p_e_year(np.minimum(np.maximum([20]*len(gl_array),5*gl_array),100),0.2)
n_nan = len(en_risks_species[en_risks_species!=en_risks_species])
en_risks_species[en_risks_species!=en_risks_species] = [p_e_year(20,0.2)]*n_nan
en_risks.append(en_risks_species)
en_risks = np.array(en_risks)
else:
print('Warning: No generation length (GL) data found. Extinction risks for status EN and CR are calculated without using GL data.')
dims = 1
en_risks = np.array([[p_e_year(20,0.2)]]*len(species_list))
en_risks_df = make_empty_rate_df(species_list,dims,'EN')
en_risks_df.iloc[:,1:] = en_risks
en_risks_df.to_csv(os.path.join(outdir,'en_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if gl_data_available:
dims = gl_matrix.shape[1]
cr_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
cr_risks_species = p_e_year(np.minimum(np.maximum([10]*len(gl_array),3*gl_array),100),0.5)
n_nan = len(cr_risks_species[cr_risks_species!=cr_risks_species])
cr_risks_species[cr_risks_species!=cr_risks_species] = [p_e_year(10,0.5)]*n_nan
cr_risks.append(cr_risks_species)
cr_risks = np.array(cr_risks)
else:
dims = 1
cr_risks = np.array([[p_e_year(10,0.5)]]*len(species_list))
cr_risks_df = make_empty_rate_df(species_list,dims,'CR')
cr_risks_df.iloc[:,1:] = cr_risks
cr_risks_df.to_csv(os.path.join(outdir,'cr_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if args.species_specific_regression:
# make regression for all other categories based on EN and CR risks
print('Fitting species-specific regression function to determine LC, NT, and VU extinction probabilities ...')
vu_risks_df = make_empty_rate_df(species_list,dims,'VU')
nt_risks_df = make_empty_rate_df(species_list,dims,'NT')
lc_risks_df = make_empty_rate_df(species_list,dims,'LC')
for i,species in enumerate(cr_risks_df.species.values):
en_risks = en_risks_df.iloc[i,1:].values
cr_risks = cr_risks_df.iloc[i,1:].values
vu_risks = []
nt_risks = []
lc_risks = []
for j,_ in enumerate(en_risks):
en_prob = en_risks[j]
cr_prob = cr_risks[j]
x = [4.,5.]
y = [en_prob,cr_prob]
# fitting the power function to the 2 data points of each species (EN and CR risk)
with warnings.catch_warnings():
# this is to avoid printing the warning from curve_fit when trying to fit function to only 2 points: "OptimizeWarning: Covariance of the parameters could not be estimated"
warnings.filterwarnings("ignore")
a_b = curve_fit(power_function,x,y);
# extracting the values for a and b from the curve fit function
a = a_b[0][0]
b = a_b[0][1]
# get values for LC, NT, and VU
p_year_LC = power_function(1,a,b)
p_year_NT = power_function(2,a,b)
p_year_VU = power_function(3,a,b)
vu_risks.append(p_year_VU)
nt_risks.append(p_year_NT)
lc_risks.append(p_year_LC)
vu_risks_df.iloc[vu_risks_df[vu_risks_df.species == species].index.values[0],1:] = np.array(vu_risks)
nt_risks_df.iloc[nt_risks_df[nt_risks_df.species == species].index.values[0],1:] = np.array(nt_risks)
lc_risks_df.iloc[lc_risks_df[lc_risks_df.species == species].index.values[0],1:] = np.array(lc_risks)
vu_risks_df.to_csv(os.path.join(outdir,'vu_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
nt_risks_df.to_csv(os.path.join(outdir,'nt_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
lc_risks_df.to_csv(os.path.join(outdir,'lc_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
#__________________________________________________________________________
# populate q-matrices______________________________________________________
print("\nPopulating species-specific q-matrices ...")
sampled_rates_df.index = sampled_rates_df.status_change.values
if extinction_probs_mode == 0:
transition_rates = sampled_rates_df.iloc[:,1:]
# randomely sample cr and en extinction probs to be used in q-matrices.
if n_rep <= dims:
sample_columns = np.random.choice(np.arange(dims),size=n_rep,replace=False)
# since there are only as many cr and en p(ex) estimates as there are provided GL values, we may have to resample some (but make sure all are present at least once)
else:
sample_columns1 = np.random.choice(np.arange(dims),size=dims,replace=False)
sample_columns2 = np.random.choice( | np.arange(dims) | numpy.arange |
import math
import numpy as np
import random
class Task(object):
def __init__(self, robot=None):
"""Init of the Task
Args:
desired_velocity: velocity that will achieve the maximum reward
max_desired_velocity: maximum desired velocity
"""
self.starting_command = np.array([1., 0., 0.])
self.sample = Sample_Command()
self.reset(robot)
self.desired_lv = 0.25
self.angle = 0
self.foot_position = np.zeros((3,12))
self.idx = -1
self.r_lv = 0
self.r_av = 0
self.r_s = 0
self.r_br = 0
self.r_bp = 0
self.r_t = 0
def reset(self, robot, command_mode=1):
"""Initializes a new instance of the robot and resets the
desired velocity"""
self.robot = robot
self.command = self.starting_command
# self.command[0] = random.uniform(0,1)
# self.command[1] = random.uniform(-1.0, 1.0)
# print(self.command)
# self.sample.reset(command_mode)
# 3 conditioned
def set_desired_yaw_rate(self, yaw_rate):
"""Sets a new desired yaw rate"""
self.command[1] = yaw_rate
def change_desired_yaw_rate(self, change):
self.command[1] += change
self.command[1] = min(max(self.command[1],-1),1)
# print(self.command[2])
def change_desired_forward_velocity(self, change):
self.command[0] += change
self.command[0] = min(max(self.command[0],0),1)
def enable_command(self):
self.stop_command = False
def get_desired_velocities_and_yaw_rate(self):
"""Get desired direction of the robot CoM and yaw rate"""
# self.command = self.sample.sample_command(
# self.robot.get_base_position()[:2],
# self.robot.get_base_roll_pitch_yaw()[2],
# 1)
# print(self.command)
return self.command
def stop(self, bool):
self.stop_command = bool
def get_reward_distribution(self):
r_lv, r_av, r_s, r_br, r_bp, r_t = self.r_lv, self.r_av, self.r_s, self.r_br, self.r_bp, self.r_t
self.r_lv, self.r_av, self.r_s, self.r_br, self.r_bp, self.r_t = 0, 0, 0, 0, 0, 0
return r_lv, r_av, r_s, r_br, r_bp, r_t
# MY REWARD
def get_reward(self, measurements, action):
"""Get the reward for the current time step
Args:
measurements: The robot's current filtered x,y velocity, roll rate,
pitch rate, and yaw rate.
Returns:
float: reward obtained in the current time step
"""
# print(self.command)
# MY LINEAR VELOCITY
v_pr = np.dot(measurements[0], self.command[0])
if v_pr > self.desired_lv:
r_lv = 1
else:
r_lv = 0.5*math.exp(-15*((measurements[0]-self.command[0]*self.desired_lv)**2))+\
0.5*math.exp(-15*((measurements[1]-self.command[1]*self.desired_lv)**2))
# MY ANGULAR REWARD
v_ar = | np.dot(measurements[4], self.command[2]) | numpy.dot |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 12:00:11 2017
@author: prmiles
"""
import numpy as np
import sys
from .utilities.progressbar import progress_bar
from .utilities.general import check_settings
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors as mplcolor
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.interpolate import interp1d
def calculate_intervals(chain, results, data, model, s2chain=None,
nsample=500, waitbar=True, sstype=0):
'''
Calculate distribution of model response to form propagation intervals
Samples values from chain, performs forward model evaluation, and
tabulates credible and prediction intervals (if obs. error var. included).
Args:
* **chain** (:class:`~numpy.ndarray`): Parameter chains, expect
shape=(nsimu, npar).
* **results** (:py:class:`dict`): Results dictionary generated by
pymcmcstat.
* **data** (:class:`~.DataStructure`): Data
* **model**: User defined function. Note, if your model outputs
multiple quantities of interest (QoI) at the same time in a
multi-dimensional array, then make sure it is returned as a
(N, p) array where N is the number of evaluation points and
p is the number of QoI.
Kwargs:
* **s2chain** (:py:class:`float`, :class:`~numpy.ndarray`, or None):
Observation error variance chain.
* **nsample** (:py:class:`int`): No. of samples drawn from posteriors.
* **waitbar** (:py:class:`bool`): Flag to display progress bar.
* **sstype** (:py:class:`int`): Sum-of-squares type. Can be 0 (normal),
1 (sqrt), or 2 (log).
Returns:
* :py:class:`dict` with two elements: 1) `credible` and 2) `prediction`
'''
parind = results['parind']
q = results['theta']
nsimu, npar = chain.shape
s2chain = check_s2chain(s2chain, nsimu)
iisample, nsample = define_sample_points(nsample, nsimu)
if waitbar is True:
__wbarstatus = progress_bar(iters=int(nsample))
ci = []
pi = []
multiple = False
for kk, isa in enumerate(iisample):
# progress bar
if waitbar is True:
__wbarstatus.update(kk)
# extract chain set
q[parind] = chain[kk, :]
# evaluate model
y = model(q, data)
# check model output
if y.ndim == 2:
nrow, ncol = y.shape
if nrow != y.size and ncol != y.size:
multiple = True
if multiple is False:
# store model prediction in credible intervals
ci.append(y.reshape(y.size,)) # store model output
if s2chain is None:
continue
else:
# estimate prediction intervals
s2 = s2chain[kk]
obs = observation_sample(s2, y, sstype)
pi.append(obs.reshape(obs.size,))
else:
# Model output contains multiple QoI
# Expect ncol = No. of QoI
if kk == 0:
cis = []
pis = []
for jj in range(ncol):
cis.append([])
pis.append([])
for jj in range(ncol):
# store model prediction in credible intervals
cis[jj].append(y[:, jj]) # store model output
if s2chain is None:
continue
else:
# estimate prediction intervals
if s2chain.ndim == 2:
if s2chain.shape[1] == ncol:
s2 = s2chain[kk, jj]
else:
s2 = s2chain[kk]
else:
s2 = s2chain[kk]
obs = observation_sample(s2, y[:, jj], sstype)
pis[jj].append(obs.reshape(obs.size,))
if multiple is False:
# Setup output
credible = np.array(ci)
if s2chain is None:
prediction = None
else:
prediction = np.array(pi)
return dict(credible=credible,
prediction=prediction)
else:
# Setup output for multiple QoI
out = []
for jj in range(ncol):
credible = np.array(cis[jj])
if s2chain is None:
prediction = None
else:
prediction = np.array(pis[jj])
out.append(dict(credible=credible,
prediction=prediction))
return out
# --------------------------------------------
def plot_intervals(intervals, time, ydata=None, xdata=None,
limits=[95],
adddata=None, addmodel=True, addlegend=True,
addcredible=True, addprediction=True,
data_display={}, model_display={}, interval_display={},
fig=None, figsize=None, legloc='upper left',
ciset=None, piset=None,
return_settings=False):
'''
Plot propagation intervals in 2-D
This routine takes the model distributions generated using the
:func:`~calculate_intervals` method and then plots specific
quantiles. The user can plot just the intervals, or also include the
median model response and/or observations. Specific settings for
credible intervals are controlled by defining the `ciset` dictionary.
Likewise, for prediction intervals, settings are defined using `piset`.
The setting options available for each interval are as follows:
- `limits`: This should be a list of numbers between 0 and 100, e.g.,
`limits=[50, 90]` will result in 50% and 90% intervals.
- `cmap`: The program is designed to "try" to choose colors that
are visually distinct. The user can specify the colormap to choose
from.
- `colors`: The user can specify the color they would like for each
interval in a list, e.g., ['r', 'g', 'b']. This list should have
the same number of elements as `limits` or the code will revert
back to its default behavior.
Args:
* **intervals** (:py:class:`dict`): Interval dictionary generated
using :meth:`calculate_intervals` method.
* **time** (:class:`~numpy.ndarray`): Independent variable, i.e.,
x-axis of plot
Kwargs:
* **ydata** (:class:`~numpy.ndarray` or None): Observations, expect
1-D array if defined.
* **xdata** (:class:`~numpy.ndarray` or None): Independent values
corresponding to observations. This is required if the observations
do not align with your times of generating the model response.
* **limits** (:py:class:`list`): Quantile limits that correspond to
percentage size of desired intervals. Note, this is the default
limits, but specific limits can be defined using the `ciset` and
`piset` dictionaries.
* **adddata** (:py:class:`bool`): Flag to include data
* **addmodel** (:py:class:`bool`): Flag to include median model
response
* **addlegend** (:py:class:`bool`): Flag to include legend
* **addcredible** (:py:class:`bool`): Flag to include credible
intervals
* **addprediction** (:py:class:`bool`): Flag to include prediction
intervals
* **model_display** (:py:class:`dict`): Display settings for median
model response
* **data_display** (:py:class:`dict`): Display settings for data
* **interval_display** (:py:class:`dict`): General display settings
for intervals.
* **fig**: Handle of previously created figure object
* **figsize** (:py:class:`tuple`): (width, height) in inches
* **legloc** (:py:class:`str`): Legend location - matplotlib help for
details.
* **ciset** (:py:class:`dict`): Settings for credible intervals
* **piset** (:py:class:`dict`): Settings for prediction intervals
* **return_settings** (:py:class:`bool`): Flag to return ciset and
piset along with fig and ax.
Returns:
* (:py:class:`tuple`) with elements
1) Figure handle
2) Axes handle
3) Dictionary with `ciset` and `piset` inside (only
outputted if `return_settings=True`)
'''
# unpack dictionary
credible = intervals['credible']
prediction = intervals['prediction']
# Check user-defined settings
ciset = __setup_iset(ciset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
piset = __setup_iset(piset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
# Check limits
ciset['limits'] = _check_limits(ciset['limits'], limits)
piset['limits'] = _check_limits(piset['limits'], limits)
# convert limits to ranges
ciset['quantiles'] = _convert_limits(ciset['limits'])
piset['quantiles'] = _convert_limits(piset['limits'])
# setup display settings
interval_display, model_display, data_display = setup_display_settings(
interval_display, model_display, data_display)
# Define colors
ciset['colors'] = setup_interval_colors(ciset, inttype='ci')
piset['colors'] = setup_interval_colors(piset, inttype='pi')
# Define labels
ciset['labels'] = _setup_labels(ciset['limits'], inttype='CI')
piset['labels'] = _setup_labels(piset['limits'], inttype='PI')
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.gca()
time = time.reshape(time.size,)
# add prediction intervals
if addprediction is True:
for ii, quantile in enumerate(piset['quantiles']):
pi = generate_quantiles(prediction, np.array(quantile))
ax.fill_between(time, pi[0], pi[1], facecolor=piset['colors'][ii],
label=piset['labels'][ii], **interval_display)
# add credible intervals
if addcredible is True:
for ii, quantile in enumerate(ciset['quantiles']):
ci = generate_quantiles(credible, np.array(quantile))
ax.fill_between(time, ci[0], ci[1], facecolor=ciset['colors'][ii],
label=ciset['labels'][ii], **interval_display)
# add model (median model response)
if addmodel is True:
ci = generate_quantiles(credible, np.array(0.5))
ax.plot(time, ci, **model_display)
# add data to plot
if ydata is not None and adddata is None:
adddata = True
if adddata is True and ydata is not None:
if xdata is None:
ax.plot(time, ydata, **data_display)
else:
ax.plot(xdata, ydata, **data_display)
# add legend
if addlegend is True:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=legloc)
if return_settings is True:
return fig, ax, dict(ciset=ciset, piset=piset)
else:
return fig, ax
# --------------------------------------------
def plot_3d_intervals(intervals, time, ydata=None, xdata=None,
limits=[95],
adddata=False, addlegend=True,
addmodel=True, figsize=None, model_display={},
data_display={}, interval_display={},
addcredible=True, addprediction=True,
fig=None, legloc='upper left',
ciset=None, piset=None,
return_settings=False):
'''
Plot propagation intervals in 3-D
This routine takes the model distributions generated using the
:func:`~calculate_intervals` method and then plots specific
quantiles. The user can plot just the intervals, or also include the
median model response and/or observations. Specific settings for
credible intervals are controlled by defining the `ciset` dictionary.
Likewise, for prediction intervals, settings are defined using `piset`.
The setting options available for each interval are as follows:
- `limits`: This should be a list of numbers between 0 and 100, e.g.,
`limits=[50, 90]` will result in 50% and 90% intervals.
- `cmap`: The program is designed to "try" to choose colors that
are visually distinct. The user can specify the colormap to choose
from.
- `colors`: The user can specify the color they would like for each
interval in a list, e.g., ['r', 'g', 'b']. This list should have
the same number of elements as `limits` or the code will revert
back to its default behavior.
Args:
* **intervals** (:py:class:`dict`): Interval dictionary generated
using :meth:`calculate_intervals` method.
* **time** (:class:`~numpy.ndarray`): Independent variable, i.e.,
x- and y-axes of plot. Note, it must be a 2-D array with
shape=(N, 2), where N is the number of evaluation points.
Kwargs:
* **ydata** (:class:`~numpy.ndarray` or None): Observations, expect
1-D array if defined.
* **xdata** (:class:`~numpy.ndarray` or None): Independent values
corresponding to observations. This is required if the observations
do not align with your times of generating the model response.
* **limits** (:py:class:`list`): Quantile limits that correspond to
percentage size of desired intervals. Note, this is the default
limits, but specific limits can be defined using the `ciset` and
`piset` dictionaries.
* **adddata** (:py:class:`bool`): Flag to include data
* **addmodel** (:py:class:`bool`): Flag to include median model
response
* **addlegend** (:py:class:`bool`): Flag to include legend
* **addcredible** (:py:class:`bool`): Flag to include credible
intervals
* **addprediction** (:py:class:`bool`): Flag to include prediction
intervals
* **model_display** (:py:class:`dict`): Display settings for median
model response
* **data_display** (:py:class:`dict`): Display settings for data
* **interval_display** (:py:class:`dict`): General display settings
for intervals.
* **fig**: Handle of previously created figure object
* **figsize** (:py:class:`tuple`): (width, height) in inches
* **legloc** (:py:class:`str`): Legend location - matplotlib help for
details.
* **ciset** (:py:class:`dict`): Settings for credible intervals
* **piset** (:py:class:`dict`): Settings for prediction intervals
* **return_settings** (:py:class:`bool`): Flag to return ciset and
piset along with fig and ax.
Returns:
* (:py:class:`tuple`) with elements
1) Figure handle
2) Axes handle
3) Dictionary with `ciset` and `piset` inside (only
outputted if `return_settings=True`)
'''
# unpack dictionary
credible = intervals['credible']
prediction = intervals['prediction']
# Check user-defined settings
ciset = __setup_iset(ciset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
piset = __setup_iset(piset,
default_iset=dict(
limits=limits,
cmap=None,
colors=None))
# Check limits
ciset['limits'] = _check_limits(ciset['limits'], limits)
piset['limits'] = _check_limits(piset['limits'], limits)
# convert limits to ranges
ciset['quantiles'] = _convert_limits(ciset['limits'])
piset['quantiles'] = _convert_limits(piset['limits'])
# setup display settings
interval_display, model_display, data_display = setup_display_settings(
interval_display, model_display, data_display)
# Define colors
ciset['colors'] = setup_interval_colors(ciset, inttype='ci')
piset['colors'] = setup_interval_colors(piset, inttype='pi')
# Define labels
ciset['labels'] = _setup_labels(ciset['limits'], inttype='CI')
piset['labels'] = _setup_labels(piset['limits'], inttype='PI')
if fig is None:
fig = plt.figure(figsize=figsize)
ax = Axes3D(fig)
ax = fig.gca()
time1 = time[:, 0]
time2 = time[:, 1]
# add prediction intervals
if addprediction is True:
for ii, quantile in enumerate(piset['quantiles']):
pi = generate_quantiles(prediction, np.array(quantile))
# Add a polygon instead of fill_between
rev = np.arange(time1.size - 1, -1, -1)
x = np.concatenate((time1, time1[rev]))
y = np.concatenate((time2, time2[rev]))
z = np.concatenate((pi[0], pi[1][rev]))
verts = [list(zip(x, y, z))]
surf = Poly3DCollection(verts,
color=piset['colors'][ii],
label=piset['labels'][ii])
# Add fix for legend compatibility
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
ax.add_collection3d(surf)
# add credible intervals
if addcredible is True:
for ii, quantile in enumerate(ciset['quantiles']):
ci = generate_quantiles(credible, np.array(quantile))
# Add a polygon instead of fill_between
rev = np.arange(time1.size - 1, -1, -1)
x = np.concatenate((time1, time1[rev]))
y = np.concatenate((time2, time2[rev]))
z = np.concatenate((ci[0], ci[1][rev]))
verts = [list(zip(x, y, z))]
surf = Poly3DCollection(verts,
color=ciset['colors'][ii],
label=ciset['labels'][ii])
# Add fix for legend compatibility
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
ax.add_collection3d(surf)
# add model (median model response)
if addmodel is True:
ci = generate_quantiles(credible, np.array(0.5))
ax.plot(time1, time2, ci, **model_display)
# add data to plot
if ydata is not None and adddata is None:
adddata = True
if adddata is True:
if xdata is None:
ax.plot(time1, time2, ydata.reshape(time1.shape), **data_display)
else: # User provided xdata array for observation points
ax.plot(xdata[:, 0], xdata[:, 1],
ydata.reshape(time1.shape), **data_display)
# add legend
if addlegend is True:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=legloc)
if return_settings is True:
return fig, ax, dict(ciset=ciset, piset=piset)
else:
return fig, ax
def check_s2chain(s2chain, nsimu):
'''
Check size of s2chain
Args:
* **s2chain** (:py:class:`float`, :class:`~numpy.ndarray`, or `None`):
Observation error variance chain or value
* **nsimu** (:py:class:`int`): No. of elements in chain
Returns:
* **s2chain** (:class:`~numpy.ndarray` or `None`)
'''
if s2chain is None:
return None
else:
if isinstance(s2chain, float):
s2chain = np.ones((nsimu,))*s2chain
if s2chain.ndim == 2:
if s2chain.shape[0] != nsimu:
s2chain = s2chain * np.ones((nsimu, s2chain.size))
else:
if s2chain.size != nsimu: # scalars provided for multiple QoI
s2chain = s2chain * np.ones((nsimu, s2chain.size))
return s2chain
# --------------------------------------------
def observation_sample(s2, y, sstype):
'''
Calculate model response with observation errors.
Args:
* **s2** (:class:`~numpy.ndarray`): Observation error(s).
* **y** (:class:`~numpy.ndarray`): Model responses.
* **sstype** (:py:class:`int`): Flag to specify sstype.
Returns:
* **opred** (:class:`~numpy.ndarray`): Model responses with observation errors.
'''
if sstype == 0:
opred = y + np.random.standard_normal(y.shape) * np.sqrt(s2)
elif sstype == 1: # sqrt
opred = (np.sqrt(y) + np.random.standard_normal(y.shape) * np.sqrt(s2))**2
elif sstype == 2: # log
opred = y*np.exp(np.random.standard_normal(y.shape) * np.sqrt(s2))
else:
sys.exit('Unknown sstype')
return opred
# --------------------------------------------
def define_sample_points(nsample, nsimu):
'''
Define indices to sample from posteriors.
Args:
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
* **nsimu** (:py:class:`int`): Number of MCMC simulations.
Returns:
* **iisample** (:class:`~numpy.ndarray`): Array of indices in posterior set.
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
'''
# define sample points
if nsample >= nsimu:
iisample = range(nsimu) # sample all points from chain
nsample = nsimu
else:
# randomly sample from chain
iisample = np.ceil(np.random.rand(nsample)*nsimu) - 1
iisample = iisample.astype(int)
return iisample, nsample
# --------------------------------------------
def generate_quantiles(x, p=np.array([0.25, 0.5, 0.75])):
'''
Calculate empirical quantiles.
Args:
* **x** (:class:`~numpy.ndarray`): Observations from which to generate quantile.
* **p** (:class:`~numpy.ndarray`): Quantile limits.
Returns:
* (:class:`~numpy.ndarray`): Interpolated quantiles.
'''
# extract number of rows/cols from np.array
n = x.shape[0]
# define vector valued interpolation function
xpoints = np.arange(0, n, 1)
interpfun = interp1d(xpoints, | np.sort(x, 0) | numpy.sort |
import numpy as np
from baselines import util
import os
import copy
import nltk
#import crf
import scipy.special
import sklearn
class HMM:
"""
Hidden Markov Model
"""
def __init__(self, n, m):
"""
fix n, m
:param n: number of states
:param m: number of observations
"""
self.n = n
self.m = m
self.t = np.zeros((n, n))
self.e = np.zeros((n, m))
self.start = np.asarray([1.0 / n] * n)
def pr_obs(self, i, list_features, t=None):
"""
:param i: state
:param list_features:
:param t: time, not used here
:return: probability of observing the features in state i
"""
res = 1
for f in list_features:
res *= self.e[i, f]
return res
def decode(self, a, include_crowd_obs=False):
"""
Viterbi decoding
:param a: seq of observations, each observation is a list of features
:return:
"""
l = len(a)
if l == 0:
return []
# c[t][i] = prob of best path time t, at state i
c = np.zeros((l, self.n))
c[0] = np.copy(self.start) # * self.e[:, a[0]]
# print self.n, c.shape
for i in range(self.n):
c[0][i] *= self.pr_obs(i, a[0])
# b[t][i] = backpointer
b = np.zeros((l, self.n))
for t in range(1, l, 1): # time
ob = a[t]
for i in range(self.n): # current state
for j in range(self.n): # previous state
# todo: change to log scale
p = c[t - 1][j] * self.t[j, i] * self.pr_obs(i, ob)
if include_crowd_obs:
p *= self.pr_crowd_labs(t, i, self.current_list_cl)
# print t, i, j, p
if p > c[t][i]:
c[t][i] = p
b[t][i] = j
# normalise otherwise p ends up as zeros with long sequences
c_t_total = 0
for i in range(self.n):
c_t_total += c[t][i]
for i in range(self.n):
c[t][i] /= c_t_total
res = np.zeros((l,))
# trace
p = 0
for i in range(self.n):
if c[l - 1][i] > p:
p = c[l - 1][i]
res[l - 1] = i
seq_prob = p
for t in range(l - 2, -1, -1):
res[t] = b[int(t + 1), int(res[t + 1])]
# print c
# print b
return res, seq_prob
def learn(self, sentences, smooth=0.001):
"""
learn parameters from labeled data
:param sentences: list of sentence, which is list of instance
:return:
"""
# counting
self.t = smooth * np.ones((self.n, self.n))
self.e = smooth * np.ones((self.n, self.m))
self.start = smooth * | np.ones((self.n,)) | numpy.ones |
import os
import sys
import obspy
import scipy
import pyasdf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import next_fast_len
from obspy.signal.filter import bandpass
from seisgo import noise, stacking,utils
import pygmt as gmt
from obspy import UTCDateTime
def plot_eventsequence(cat,figsize=(12,4),ytype='magnitude',figname=None,
yrange=None,save=False,stem=True):
if isinstance(cat,obspy.core.event.catalog.Catalog):
cat=pd.DataFrame(utils.qml2list(cat))
elif isinstance(cat,list):
cat=pd.DataFrame(cat)
#All magnitudes greater than or equal to the limit will be plotted
plt.figure(figsize=figsize)
plt.title(ytype+" vs. time")
plt.xlabel("Date (UTC)")
plt.ylabel(ytype)
if yrange is not None:
ymin,ymax=yrange
if ytype.lower()=="magnitude":
cat2=cat[(cat.magnitude>=yrange[0]) & (cat.magnitude<=yrange[1]) ]
elif ytype.lower()=="depth":
cat2=cat[(cat.depth>=yrange[0]) & (cat.depth<=yrange[1]) ]
else:
cat2=cat
if ytype.lower()=="magnitude":
ymin=np.min(cat2.magnitude)*0.9
ymax=np.max(cat2.magnitude)*1.1
elif ytype.lower()=="depth":
ymin=np.min(cat2.depth)*0.9
ymax=np.max(cat2.depth)*1.1
t=[]
for i in range(len(cat2)):
tTime=obspy.UTCDateTime(cat2.iloc[i]["datetime"])
t.append(tTime.datetime)
if stem:
if ytype.lower()=="magnitude":
markerline, stemlines, baseline=plt.stem(t,cat2.magnitude,linefmt='k-',markerfmt="o",
bottom=ymin)
elif ytype.lower()=="depth":
markerline, stemlines, baseline=plt.stem(t,cat2.depth,linefmt='k-',markerfmt="o",
bottom=ymin)
markerline.set_markerfacecolor('r')
markerline.set_markeredgecolor('r')
else:
if ytype.lower()=="magnitude":
plt.scatter(t,cat2.magnitude,5,'k')
elif ytype.lower()=="depth":
plt.scatter(t,cat2.depth,cat2.magnitude,'k')
#
plt.grid(axis="both")
plt.ylim([ymin,ymax])
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig(ytype+"_vs_time.png")
else:
plt.show()
def plot_stations(lon,lat,region,markersize="c0.2c",title="station map",style="fancy",figname=None,
format='png',distance=None,projection="M5i", xshift="6i",frame="af"):
"""
lon, lat: could be list of vectors contaning multiple sets of stations. The number of sets must be the same
as the length of the marker list.
marker: a list specifying the symbols for each station set.
region: [minlon,maxlon,minlat,maxlat] for map view
"""
nsta=len(lon)
if isinstance(markersize,str):
markersize=[markersize]*nsta
fig = gmt.Figure()
gmt.config(MAP_FRAME_TYPE=style)
for i in range(nsta):
if i==0:
fig.coast(region=region, resolution="f",projection=projection, rivers='rivers',
water="cyan",frame=frame,land="white",
borders=["1/0.5p,gray,2/1p,gray"])
fig.basemap(frame='+t"'+title+'"')
fig.plot(
x=lon[i],
y=lat[i],
style=markersize[i],
color="red",
)
if figname is None:
figname='stationmap.'+format
fig.savefig(figname)
print('plot was saved to: '+figname)
##plot power spectral density
def plot_psd(data,dt,labels=None,xrange=None,cmap='jet',normalize=True,figsize=(13,5),\
save=False,figname=None,tick_inc=None):
"""
Plot the power specctral density of the data array.
=PARAMETERS=
data: 2-D array containing the data. the data to be plotted should be on axis 1 (second dimention)
dt: sampling inverval in time.
labels: row labels of the data, default is None.
cmap: colormap, default is 'jet'
time_format: format to show time marks, default is: '%Y-%m-%dT%H'
normalize: whether normalize the PSD in plotting, default is True
figsize: figure size, default: (13,5)
"""
data=np.array(data)
if data.ndim > 2:
raise ValueError('only plot 1-d arrya or 2d matrix for now. the input data has a dimention of %d'%(data.ndim))
f,psd=utils.psd(data,1/dt)
f=f[1:]
plt.figure(figsize=figsize)
ax=plt.subplot(111)
if data.ndim==2:
nwin=data.shape[0]
if tick_inc is None:
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
psdN=np.ndarray((psd.shape[0],psd.shape[1]-1))
for i in range(psd.shape[0]):
if normalize: psdN[i,:]=psd[i,1:]/np.max(np.abs(psd[i,1:]))
else: psdN[i,:]=psd[i,1:]
plt.imshow(psdN,aspect='auto',extent=[f.min(),f.max(),psdN.shape[0],0],cmap=cmap)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
if labels is not None: ax.set_yticklabels(labels[0:nwin:tick_inc])
if normalize: plt.colorbar(label='normalized PSD')
else: plt.colorbar(label='PSD')
else:
if normalize: psdN=psd[1:]/np.max(np.abs(psd[1:]))
else: psdN[i,:]=psd[1:]
plt.plot(f,psdN)
if xrange is None:plt.xlim([f[1],f[-1]])
else:
plt.xlim(xrange)
plt.xscale('log')
plt.xlabel('frequency (Hz)')
plt.title('PSD')
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig("PSD.png")
else:
plt.show()
#############################################################################
############### PLOTTING RAW SEISMIC WAVEFORMS ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_waveform(sfile,net,sta,freqmin,freqmax,save=False,figdir=None,format='pdf'):
'''
display the downloaded waveform for station A
PARAMETERS:
-----------------------
sfile: containing all wavefrom data for a time-chunck in ASDF format
net,sta,comp: network, station name and component
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
USAGE:
-----------------------
plot_waveform('temp.h5','CI','BLC',0.01,0.5)
'''
# open pyasdf file to read
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
sta_list = ds.waveforms.list()
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# check whether station exists
tsta = net+'.'+sta
if tsta not in sta_list:
raise ValueError('no data for %s in %s'%(tsta,sfile))
tcomp = ds.waveforms[tsta].get_waveform_tags()
ncomp = len(tcomp)
if ncomp==0:
print('no data found for the specified net.sta.')
return None
tr = ds.waveforms[tsta][tcomp[0]]
dt = tr[0].stats.delta
npts = tr[0].stats.npts
tt = np.arange(0,npts)*dt
if ncomp == 1:
data = tr[0].data
data = bandpass(data,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,3))
plt.plot(tt,data,'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,tcomp[0].split('_')[0].upper(),freqmin,freqmax))
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.tight_layout()
plt.show()
else:
data = np.zeros(shape=(ncomp,npts),dtype=np.float32)
for ii in range(ncomp):
data[ii] = ds.waveforms[tsta][tcomp[ii]][0].data
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,6))
for c in range(ncomp):
if c==0:
plt.subplot(ncomp,1,1)
plt.plot(tt,data[0],'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,freqmin,freqmax))
plt.legend([tcomp[0].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
else:
plt.subplot(ncomp,1,c+1)
plt.plot(tt,data[c],'k-',linewidth=1)
plt.legend([tcomp[c].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
fig.tight_layout()
if save:
if not os.path.isdir(figdir):os.mkdir(figdir)
sfilebase=sfile.split('/')[-1]
outfname = figdir+'/{0:s}_{1:s}.{2:s}'.format(sfilebase.split('.')[0],net,sta)
fig.savefig(outfname+'.'+format, format=format, dpi=300)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING XCORR RESULTS AS THE OUTPUT OF SEISGO ##########################
#############################################################################
def plot_xcorr_substack(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_xcorr_substack('temp.h5',0.1,1,100,True,'./')
Note: IMPORTANT!!!! this script only works for cross-correlation with sub-stacks being set to True in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
lag0=np.min([1.0*lag,maxlag])
if lag>maxlag:raise ValueError('lag excceds maxlag!')
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep)
t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep)
t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt)
indx2 = indx1+2*int(lag0/dt)+1
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
path_lists = ds.auxiliary_data[spair].list()
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
cc_comp=chan1[-1]+chan2[-1]
if cc_comp == comp or comp=='all' or comp=='ALL':
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
dstack_robust=stacking.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(5,1,(1,3))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
ax.set_title('%s.%s.%s %s.%s.%s dist:%5.2fkm' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
# ax.set_yticklabels(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(5,1,(4,5))
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
# ax2 = fig.add_subplot(414)
# ax2.plot(amax/min(amax),'r-')
# ax2.plot(ngood,'b-')
# ax2.set_xlabel('waveform number')
# ax2.set_xticks(np.arange(0,nwin,step=tick_inc))
# ax2.set_xticklabels(tmarks[0:nwin:tick_inc])
# #for tick in ax[2].get_xticklabels():
# # tick.set_rotation(30)
# ax2.legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}_{6:s}-{7:s}Hz.png'.format(net1,sta1,\
chan1,net2,\
sta2,chan2,
str(freqmin),str(freqmax))
fig.savefig(outfname, format='png', dpi=400)
print('saved to: '+outfname)
plt.close()
else:
fig.show()
def plot_corrfile(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figname=None,format='png',figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrfile('temp.h5',0.1,1,100,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
corrdict=noise.extract_corrdata(sfile,comp=comp)
clist=list(corrdict.keys())
for c in clist:
corr=corrdict[c]
if comp in list(corr.keys()):
corr[comp].plot(freqmin=freqmin,freqmax=freqmax,lag=lag,save=save,figdir=figdir,
figname=figname,format=format)
def plot_corrdata(corr,freqmin=None,freqmax=None,lag=None,save=False,figdir=None,figsize=(10,8)):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
corr: : class:`~seisgo.types.CorrData`
CorrData object containing the correlation functions and the metadata.
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrdata(corr,0.1,1,100,save=True,figdir='./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
netstachan1 = corr.net[0]+'.'+corr.sta[0]+'.'+corr.loc[0]+'.'+corr.chan[0]
netstachan2 = corr.net[1]+'.'+corr.sta[1]+'.'+corr.loc[1]+'.'+corr.chan[1]
dt,maxlag,dist,ngood,ttime,substack = [corr.dt,corr.lag,corr.dist,corr.ngood,corr.time,corr.substack]
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
lag0=np.min([1.0*lag,maxlag])
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep);t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep);t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt);indx2 = indx1+2*int(lag0/dt)+1
# cc matrix
if substack:
data = corr.data[:,indx1:indx2]
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1:
print('continue! no enough trace to plot!')
return
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
if freqmin is not None and freqmax is not None:
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
# dstack_robust=stack.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(6,1,(1,4))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
if freqmin is not None and freqmax is not None:
ax.set_title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
ax.set_title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(6,1,(5,6))
if freqmin is not None and freqmax is not None:
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
else:
ax1.set_title('stack: unfiltered')
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
# ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
fig.tight_layout()
else: #only one trace available
data = corr.data[indx1:indx2]
# load cc for each station-pair
if freqmin is not None and freqmax is not None:
data = bandpass(data,freqmin,freqmax,1/dt,corners=4, zerophase=True)
data = data-np.mean(data)
amax = np.max(np.abs(data))
data /= amax
timestamp = obspy.UTCDateTime(ttime)
tmarks=obspy.UTCDateTime(ttime).strftime('%Y-%m-%dT%H:%M:%S')
tx=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tx)>len(data):tx=tx[:-1]
plt.figure(figsize=figsize)
ax=plt.gca()
plt.plot(tx,data,'k-',linewidth=1)
if freqmin is not None and freqmax is not None:
plt.title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
plt.title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
plt.xlabel('time [s]')
plt.xticks(t)
ylim=ax.get_ylim()
plt.plot((0,0),ylim,'k-')
plt.ylim(ylim)
plt.xlim([-lag,lag])
ax.grid()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}_{1:s}_{2:s}-{3:s}Hz.png'.format(netstachan1,netstachan2,
str(freqmin),str(freqmax))
plt.savefig(outfname, format='png', dpi=300)
print('saved to: '+outfname)
plt.close()
else:
plt.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_substack_spect(sfile,freqmin,freqmax,lag=None,save=True,figdir='./'):
'''
display the amplitude spectrum of the cross-correlation functions for a time-chunck.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S1
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
-----------------------
plot_xcorr_substack_spect('temp.h5',0.1,1,200,True,'./')
Note: IMPORTANT!!!! this script only works for the cross-correlation with sub-stacks in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
# load cc for each station-pair
for ii in range(nwin):
spec[ii] = scipy.fftpack.fft(data[ii],nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]),axis=0)
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = max(data[ii])
data[ii] /= amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s.%s.%s %s.%s.%s dist:%5.2f km' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax[0].set_xlabel('time [s]')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:-1:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/min(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
#ax[1].set_xticks(np.arange(0,nwin,int(nwin/5)))
ax[2].legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}.pdf'.format(net1,sta1,chan1,net2,sta2,chan2)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING THE POST-STACKING XCORR FUNCTIONS AS OUTPUT OF S2 STEP IN NOISEPY ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the 2D matrix of the cross-correlation functions stacked for all time windows.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[2:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
data[ii] = ds.auxiliary_data[itype][paths].data[indx1:indx2]
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
if nwin>100:
tick_inc = int(nwin/10)
elif nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(2,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km filtered at %4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freqmin,freqmax))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].plot(amax/max(amax),'r-')
ax[1].plot(ngood,'b-')
ax[1].set_xlabel('waveform number')
ax[1].set_xticks(np.arange(0,nwin,nwin//5))
ax[1].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freqmin,freqmax)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all_spect(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the amplitude spectrum of the cross-correlation functions stacked for all time windows.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
-----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[1:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
tdata = ds.auxiliary_data[itype][paths].data[indx1:indx2]
spec[ii] = scipy.fftpack.fft(tdata,nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]))
data[ii] = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
tick_inc = 50
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km' % (sfile.split('/')[-1],dist))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/max(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
ax[2].set_xticks(np.arange(0,nwin,nwin//15))
ax[2].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.pdf'.format(sfile.split('/')[-1])
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_moveout_heatmap(sfiles,sta,dtype,freq,comp,dist_inc,lag=None,save=False,\
figsize=None,format='png',figdir=None):
'''
display the moveout (2D matrix) of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: station name as the virtual source.
dtype: datatype either 'Allstack_pws' or 'Allstack_linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
comp: cross component
dist_inc: distance bins to stack over
lag: lag times for displaying
save: set True to save the figures (in pdf format)
figdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_xcorr_moveout_heatmap('temp.h5','sta','Allstack_pws',0.1,0.2,1,'ZZ',200,True,'./temp')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
if not isinstance(freq[0],list):freq=[freq]
freq=np.array(freq)
figlabels=['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)','(i)']
if freq.shape[0]>9:
raise ValueError('freq includes more than 9 (maximum allowed for now) elements!')
elif freq.shape[0]==9:
subplot=[3,3]
figsize0=[14,7.5]
elif freq.shape[0] >=7 and freq.shape[0] <=8:
subplot=[2,4]
figsize0=[18,10]
elif freq.shape[0] >=5 and freq.shape[0] <=6:
subplot=[2,3]
figsize0=[14,7.5]
elif freq.shape[0] ==4:
subplot=[2,2]
figsize0=[10,6]
else:
subplot=[1,freq.shape[0]]
if freq.shape[0]==3:
figsize0=[13,3]
elif freq.shape[0]==2:
figsize0=[8,3]
else:
figsize0=[4,3]
if figsize is None:figsize=figsize0
path = comp
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mpi=False,mode='r')
dt = ds.auxiliary_data[dtype][path].parameters['dt']
maxlag= ds.auxiliary_data[dtype][path].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if lag is None:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=(int(2*int(lag)/4)))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# cc matrix
nwin = len(sfiles)
data0 = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
dist = np.zeros(nwin,dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
# load cc and parameter matrix
for ii in range(len(sfiles)):
sfile = sfiles[ii]
treceiver = sfile.split('_')[-1]
ds = pyasdf.ASDFDataSet(sfile,mpi=False,mode='r')
try:
# load data to variables
dist[ii] = ds.auxiliary_data[dtype][path].parameters['dist']
ngood[ii]= ds.auxiliary_data[dtype][path].parameters['ngood']
tdata = ds.auxiliary_data[dtype][path].data[indx1:indx2]
if treceiver == receiver: tdata=np.flip(tdata,axis=0)
except Exception:
print("continue! cannot read %s "%sfile);continue
data0[ii] = tdata
ntrace = int(np.round(np.max(dist)+0.51)/dist_inc)
fig=plt.figure(figsize=figsize)
for f in range(len(freq)):
freqmin=freq[f][0]
freqmax=freq[f][1]
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
for i2 in range(data0.shape[0]):
data[i2]=bandpass(data0[i2],freqmin,freqmax,1/dt,corners=4, zerophase=True)
# average cc
ndata = np.zeros(shape=(ntrace,indx2-indx1),dtype=np.float32)
ndist = np.zeros(ntrace,dtype=np.float32)
for td in range(ndata.shape[0]):
tindx = np.where((dist>=td*dist_inc)&(dist<(td+1)*dist_inc))[0]
if len(tindx):
ndata[td] = np.mean(data[tindx],axis=0)
ndist[td] = (td+0.5)*dist_inc
# normalize waveforms
indx = np.where(ndist>0)[0]
ndata = ndata[indx]
ndist = ndist[indx]
for ii in range(ndata.shape[0]):
# print(ii,np.max(np.abs(ndata[ii])))
ndata[ii] /= np.max(np.abs(ndata[ii]))
# plotting figures
ax=fig.add_subplot(subplot[0],subplot[1],f+1)
ax.matshow(ndata,cmap='seismic',extent=[-lag,lag,ndist[-1],ndist[0]],aspect='auto')
ax.set_title('%s %s stack %s %5.3f-%5.2f Hz'%(figlabels[f],sta,stack_method,freqmin,freqmax))
ax.set_xlabel('time [s]')
ax.set_ylabel('distance [km]')
ax.set_xticks(t)
ax.xaxis.set_ticks_position('bottom')
#ax.text(np.ones(len(ndist))*(lag-5),dist[ndist],ngood[ndist],fontsize=8)
plt.tight_layout()
# save figure or show
if save:
outfname = figdir+'/moveout_'+sta+'_heatmap_'+str(stack_method)+'_'+str(dist_inc)+'kmbin_'+comp+'.'+format
plt.savefig(outfname, format=format, dpi=300)
plt.close()
else:
plt.show()
#test functions
def plot_xcorr_moveout_wiggle(sfiles,sta,dtype,freq,ccomp=None,scale=1.0,lag=None,\
ylim=None,save=False,figsize=None,figdir=None,format='png',minsnr=None):
'''
display the moveout waveforms of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: source station name
dtype: datatype either 'Allstack0pws' or 'Allstack0linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
ccomp: x-correlation component names, could be a string or a list of strings.
scale: plot the waveforms with scaled amplitudes
lag: lag times for displaying
save: set True to save the figures (in pdf format)
figdir: diresied directory to save the figure (if not provided, save to default dir)
minsnr: mimumum SNR as a QC criterion, the SNR is computed as max(abs(trace))/mean(abs(trace)),
without signal and noise windows.
USAGE:
----------------------
plot_xcorr_moveout_wiggle('temp.h5','Allstack0pws',0.1,0.2,'ZZ',200,True,'./temp')
'''
if not isinstance(freq[0],list):freq=[freq]
freq= | np.array(freq) | numpy.array |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
try:
from QGL import *
from QGL import config as QGLconfig
from QGL.BasicSequences.helpers import create_cal_seqs, delay_descriptor, cal_descriptor
except:
print("Could not find QGL")
import auspex.config as config
from auspex.log import logger
from copy import copy, deepcopy
# from adapt.refine import refine_1D
import os
import uuid
import pandas as pd
import networkx as nx
import scipy as sp
import subprocess
import zmq
import json
import datetime
from copy import copy
import time
import bbndb
from auspex.filters import DataBuffer
from .qubit_exp import QubitExperiment
from .pulse_calibration import Calibration, CalibrationExperiment
from . import pipeline
from auspex.parameter import FloatParameter
from auspex.filters.plot import ManualPlotter
from auspex.analysis.fits import *
from auspex.analysis.CR_fits import *
from auspex.analysis.qubit_fits import *
from auspex.analysis.helpers import normalize_buffer_data
from matplotlib import cm
from scipy.optimize import curve_fit, minimize
import numpy as np
from itertools import product
from collections import Iterable, OrderedDict
available_optimizers = ['SCIPY']
try:
from bayes_opt import BayesianOptimization
available_optimizers += ['BAYES']
except ImportError:
logger.info("Could not import BayesianOptimization package.")
try:
import cma
available_optimizers += ['CMA']
except ImportError:
logger.info("Could not import pyCMA optimization package.")
class CloseEnough(Exception):
pass
class QubitOptimizer(Calibration):
"""
Class for running an optimization over Auspex experiments.
"""
def __init__(self, qubits, sequence_function, cost_function,
initial_parameters=None, other_variables=None,
optimizer="scipy", optim_params=None, min_cost = None,
output_nodes=None, stream_selectors=None, do_plotting=True, **kwargs):
"""Setup an optimization over qubit experiments.
Args:
qubits: The qubit(s) that the optimization is run over.
sequence_function: A function of the form
`sequence_function(*qubits, **params)`
that returns a valid QGL sequence for the qubits and initial
parameters.
cost_function: The objective function for the optimization. The input
for this function comes from the filter pipeline node specified
in `output_nodes` or inferred from the qubits (may not be
reliable!). This function is responsible for choosing the
appropriate quadrature as necessary.
initial_parameters: A dict of initial parameters for `sequence_function`.
other_variables: A dict of other Auspex qubit experiment variables
(not associated with sequence generation) as keys and initial
parameters as values. Example:
`{"q1 control frequency": 5.4e9, "q2 measure frequency": 6.7e9}`
optimizer: String which chooses the optimization function. Supported
values are: "scipy" for scipy.optimize.minimize, "bayes" for
the BayesianOptimization package
optim_params: Dict of keyword arguments to be passed to the
optimization function.
min_cost: Minimum value of cost function, optional.
"""
self.qubits = list(qubits) if isinstance(qubits, Iterable) else [qubits]
self.sequence_function = sequence_function
self.cost_function = cost_function
self.optimizer = optimizer.upper()
self.optim_params = optim_params
self.output_nodes = output_nodes if isinstance(output_nodes, Iterable) else [output_nodes]
self.stream_selectors = stream_selectors
self.do_plotting = do_plotting
self.cw_mode = False
self.leave_plots_open = True
self.axis_descriptor = None
self.succeeded = False
self.norm_points = False
self.kwargs = kwargs
self.plotters = []
self.fake_data = []
self.sample = None
self.metafile = None
self.costs = []
self.fake = False
self.niterations = 0
self.min_cost = min_cost
if initial_parameters:
self.initial_parameters = OrderedDict(initial_parameters)
self.recompile = True
else:
self.initial_parameters = {}
self.recompile = False
if other_variables:
self.other_variables = OrderedDict(other_variables)
else:
self.other_variables = None
self.seq_params = self.initial_parameters
self.other_params = self.other_variables
self.param_history = OrderedDict({k: [] for k in self.parameters().keys()})
self.bounds = OrderedDict({})
super().__init__()
if self.optimizer not in available_optimizers:
raise ValueError(f"Unknown optimizer: {self.optimizer}. Availabe are: {available_optimizers}")
def init_plots(self):
plot1 = ManualPlotter("Objective", x_label="Iteration", y_label="Value")
plot1.add_data_trace("Objective", {'color': 'C1'})
self.plot1 = plot1
plot2 = ManualPlotter("Paramters", x_label="Iteration", y_label="Value")
for idx, key in enumerate(self.parameters().keys()):
plot2.add_data_trace(key, {'color': f'C{idx}'})
self.plot2 = plot2
return [plot1, plot2]
def update_plots(self):
iters = np.array(range(1,self.niterations+1))
self.plot1['Objective'] = (iters, np.array(self.costs))
for k, v in self.param_history.items():
self.plot2[k] = (iters, | np.array(v) | numpy.array |
#!/usr/bin/env python
"""
Generic python script.
"""
__author__ = "<NAME>"
import glob
import os
import numpy as np
import healpy as hp
import scipy.stats
import scipy.interpolate
import scipy.ndimage
import simple_adl.query_dl
import simple_adl.projector
#-------------------------------------------------------------------------------
# From https://github.com/DarkEnergySurvey/ugali/blob/master/ugali/utils/healpix.py
def superpixel(subpix, nside_subpix, nside_superpix):
"""
Return the indices of the super-pixels which contain each of the sub-pixels.
"""
if nside_subpix==nside_superpix: return subpix
theta, phi = hp.pix2ang(nside_subpix, subpix)
return(hp.ang2pix(nside_superpix, theta, phi))
def subpixel(superpix, nside_superpix, nside_subpix):
"""
Return the indices of sub-pixels (resolution nside_subpix) within
the super-pixel with (resolution nside_superpix).
ADW: It would be better to convert to next and do this explicitly
"""
if nside_superpix==nside_subpix: return superpix
vec = hp.pix2vec(nside_superpix, superpix)
radius = np.degrees(2. * hp.max_pixrad(nside_superpix))
subpix = hp.query_disc(nside_subpix, vec, np.radians(radius))
pix_for_subpix = superpixel(subpix,nside_subpix,nside_superpix)
# Might be able to speed up array indexing...
return(subpix[pix_for_subpix == superpix])
#-------------------------------------------------------------------------------
class Survey():
"""
Class to handle survey-specific parameters.
"""
def __init__(self, iterable=(), **kwargs):
self.__dict__.update(iterable, **kwargs)
self.mag_1 = self.catalog['mag'].format(self.band_1)
self.mag_2 = self.catalog['mag'].format(self.band_2)
self.mag_dered_1 = self.catalog['mag_dered'].format(self.band_1)
self.mag_dered_2 = self.catalog['mag_dered'].format(self.band_2)
self.mag_err_1 = self.catalog['mag_err'].format(self.band_1)
self.mag_err_2 = self.catalog['mag_err'].format(self.band_2)
self.load_fracdet
@property
def load_fracdet(self):
"""
Load-in the fracdet map if it exists.
"""
#if self.survey['fracdet']:
# print('Reading fracdet map {} ...'.format(self.survey['fracdet']))
# fracdet = ugali.utils.healpix.read_map(self.survey['fracdet'])
#else:
# print('No fracdet map specified ...')
# fracdet = None
##return(fracdet)
#self.fracdet = fracdet
# SM: Commenting this out until I have a fracdet map to debug with
self.fracdet = None
#-------------------------------------------------------------------------------
class Region():
"""
Class to handle regions.
"""
def __init__(self, survey, ra, dec):
self.survey = survey
self.nside = self.survey.catalog['nside']
self.fracdet = self.survey.fracdet
self.ra = ra
self.dec = dec
self.proj = simple_adl.projector.Projector(self.ra, self.dec)
self.pix_center = hp.ang2pix(self.nside, self.ra, self.dec, lonlat=True)
def load_data(self, stars=True, galaxies=False):
# SM: to query the equivalent of hp.get_all_neighbors() for nside=32,
# choose a radius of 3 deg:
#>>> np.sqrt((1/np.pi)*8*hp.nside2pixarea(nside=32, degrees=True))
#2.9238630046262855
data = simple_adl.query_dl.query(self.survey.catalog['profile'], self.ra, self.dec, radius=3.0, gmax=self.survey.catalog['mag_max'], stars=stars, galaxies=galaxies)
self.data = data
def characteristic_density(self, iso_sel):
"""
Compute the characteristic density of a region
Convlve the field and find overdensity peaks
"""
x, y = self.proj.sphereToImage(self.data[self.survey.catalog['basis_1']][iso_sel], self.data[self.survey.catalog['basis_2']][iso_sel]) # Trimmed magnitude range for hotspot finding
#x_full, y_full = proj.sphereToImage(data[basis_1], data[basis_2]) # If we want to use full magnitude range for significance evaluation
delta_x = 0.01
area = delta_x**2
smoothing = 2. / 60. # Was 3 arcmin
bins = | np.arange(-8., 8. + 1.e-10, delta_x) | numpy.arange |
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
bc = {'country':'CH', # considers electricity network losses for Switzerland
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
], # in this case, 100% nuclear for the second year
'fuel blend':{
'cng':{ #specify fuel bland for compressed gas
'primary fuel':{
'type':'biogas',
'share':[0.9, 0.8, 0.7, 0.6] # shares per year. Must total 1 for each year.
},
'secondary fuel':{
'type':'syngas',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'diesel':{
'primary fuel':{
'type':'synthetic diesel',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'biodiesel - cooking oil',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'petrol':{
'primary fuel':{
'type':'petrol',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'bioethanol - wheat straw',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'hydrogen':{
'primary fuel':{'type':'electrolysis', 'share':[1, 0, 0, 0]},
'secondary fuel':{'type':'smr - natural gas', 'share':[0, 1, 1, 1]}
}
},
'energy storage': {
'electric': {
'type':'NMC',
'origin': 'NO'
},
'hydrogen': {
'type':'carbon fiber'
}
}
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `primary` and `secondary` fuel keys contain an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
Here is a list of available fuel pathways:
Hydrogen technologies
--------------------
electrolysis
smr - natural gas
smr - natural gas with CCS
smr - biogas
smr - biogas with CCS
coal gasification
wood gasification
wood gasification with CCS
Natural gas technologies
------------------------
cng
biogas
syngas
Diesel technologies
-------------------
diesel
biodiesel - algae
biodiesel - cooking oil
synthetic diesel
Petrol technologies
-------------------
petrol
bioethanol - wheat straw
bioethanol - maize starch
bioethanol - sugarbeet
bioethanol - forest residues
synthetic gasoline
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("SSP2-Baseline": business-as-usual,
"SSP2-PkBudg1100": limits cumulative GHG emissions to 1,100 gigatons by 2100,
"static": no forward-looking modification of the background inventories).
"SSP2-Baseline" selected by default.
.. code-block:: python
"""
def __init__(
self, array, scope=None, background_configuration=None, scenario="SSP2-Base", method="recipe", method_type="midpoint"
):
if scope is None:
scope = {}
scope["size"] = array.coords["size"].values.tolist()
scope["powertrain"] = array.coords["powertrain"].values.tolist()
scope["year"] = array.coords["year"].values.tolist()
else:
scope["size"] = scope.get("size", array.coords["size"].values.tolist())
scope["powertrain"] = scope.get(
"powertrain", array.coords["powertrain"].values.tolist()
)
scope["year"] = scope.get("year", array.coords["year"].values.tolist())
self.scope = scope
self.scenario = scenario
array = array.sel(
powertrain=self.scope["powertrain"],
year=self.scope["year"],
size=self.scope["size"],
)
self.array = array.stack(desired=["size", "powertrain", "year"])
self.iterations = len(array.value.values)
self.number_of_cars = (
len(self.scope["size"])
* len(self.scope["powertrain"])
* len(self.scope["year"])
)
self.array_inputs = {
x: i for i, x in enumerate(list(self.array.parameter.values), 0)
}
self.array_powertrains = {
x: i for i, x in enumerate(list(self.array.powertrain.values), 0)
}
if not background_configuration is None:
self.background_configuration = background_configuration
else:
self.background_configuration = {}
if "energy storage" not in self.background_configuration:
self.background_configuration["energy storage"] = {
"electric": {"type": "NMC", "origin": "CN"}
}
else:
if "electric" not in self.background_configuration["energy storage"]:
self.background_configuration["energy storage"]["electric"] = {
"type": "NMC",
"origin": "CN",
}
else:
if (
"origin"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"origin"
] = "CN"
if (
"type"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"type"
] = "NMC"
self.inputs = self.get_dict_input()
self.bs = BackgroundSystemModel()
self.country = self.get_country_of_use()
self.add_additional_activities()
self.rev_inputs = self.get_rev_dict_input()
self.A = self.get_A_matrix()
self.mix = self.define_electricity_mix_for_fuel_prep()
self.fuel_blends = {}
self.define_fuel_blends()
self.set_actual_range()
self.index_cng = [self.inputs[i] for i in self.inputs if "ICEV-g" in i[0]]
self.index_combustion_wo_cng = [
self.inputs[i]
for i in self.inputs
if any(
ele in i[0]
for ele in ["ICEV-p", "HEV-p", "PHEV-p", "ICEV-d", "PHEV-d", "HEV-d"]
)
]
self.index_diesel = [self.inputs[i] for i in self.inputs if "ICEV-d" in i[0]]
self.index_all_petrol = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["ICEV-p", "HEV-p", "PHEV-p"])
]
self.index_petrol = [self.inputs[i] for i in self.inputs if "ICEV-p" in i[0]]
self.index_hybrid = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["HEV-p", "HEV-d"])
]
self.index_plugin_hybrid = [
self.inputs[i] for i in self.inputs if "PHEV" in i[0]
]
self.index_fuel_cell = [self.inputs[i] for i in self.inputs if "FCEV" in i[0]]
self.map_non_fuel_emissions = {
(
"Methane, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Methane direct emissions, suburban",
(
"Methane, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Methane direct emissions, rural",
(
"Lead",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Lead direct emissions, suburban",
(
"Ammonia",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Ammonia direct emissions, suburban",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "urban air close to ground"),
"kilogram",
): "NMVOC direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "urban air close to ground"),
"kilogram",
): "Hydrocarbons direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "low population density, long-term"),
"kilogram",
): "Dinitrogen oxide direct emissions, rural",
(
"Nitrogen oxides",
("air", "urban air close to ground"),
"kilogram",
): "Nitrogen oxides direct emissions, urban",
(
"Ammonia",
("air", "urban air close to ground"),
"kilogram",
): "Ammonia direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Particulate matters direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Carbon monoxide direct emissions, urban",
(
"Nitrogen oxides",
("air", "low population density, long-term"),
"kilogram",
): "Nitrogen oxides direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "non-urban air or from high stacks"),
"kilogram",
): "NMVOC direct emissions, suburban",
(
"Benzene",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Benzene direct emissions, suburban",
(
"Ammonia",
("air", "low population density, long-term"),
"kilogram",
): "Ammonia direct emissions, rural",
(
"Sulfur dioxide",
("air", "low population density, long-term"),
"kilogram",
): "Sulfur dioxide direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "low population density, long-term"),
"kilogram",
): "NMVOC direct emissions, rural",
(
"Particulates, < 2.5 um",
("air", "urban air close to ground"),
"kilogram",
): "Particulate matters direct emissions, urban",
(
"Sulfur dioxide",
("air", "urban air close to ground"),
"kilogram",
): "Sulfur dioxide direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Dinitrogen oxide direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Carbon monoxide direct emissions, rural",
(
"Methane, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Methane direct emissions, urban",
(
"Carbon monoxide, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Carbon monoxide direct emissions, suburban",
(
"Lead",
("air", "urban air close to ground"),
"kilogram",
): "Lead direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "low population density, long-term"),
"kilogram",
): "Particulate matters direct emissions, rural",
(
"Sulfur dioxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Sulfur dioxide direct emissions, suburban",
(
"Benzene",
("air", "low population density, long-term"),
"kilogram",
): "Benzene direct emissions, rural",
(
"Nitrogen oxides",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Nitrogen oxides direct emissions, suburban",
(
"Lead",
("air", "low population density, long-term"),
"kilogram",
): "Lead direct emissions, rural",
(
"Benzene",
("air", "urban air close to ground"),
"kilogram",
): "Benzene direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "low population density, long-term"),
"kilogram",
): "Hydrocarbons direct emissions, rural",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Hydrocarbons direct emissions, suburban",
(
"Dinitrogen monoxide",
("air", "urban air close to ground"),
"kilogram",
): "Dinitrogen oxide direct emissions, urban",
}
self.index_emissions = [
self.inputs[i] for i in self.map_non_fuel_emissions.keys()
]
self.map_noise_emissions = {
(
"noise, octave 1, day time, urban",
("octave 1", "day time", "urban"),
"joule",
): "noise, octave 1, day time, urban",
(
"noise, octave 2, day time, urban",
("octave 2", "day time", "urban"),
"joule",
): "noise, octave 2, day time, urban",
(
"noise, octave 3, day time, urban",
("octave 3", "day time", "urban"),
"joule",
): "noise, octave 3, day time, urban",
(
"noise, octave 4, day time, urban",
("octave 4", "day time", "urban"),
"joule",
): "noise, octave 4, day time, urban",
(
"noise, octave 5, day time, urban",
("octave 5", "day time", "urban"),
"joule",
): "noise, octave 5, day time, urban",
(
"noise, octave 6, day time, urban",
("octave 6", "day time", "urban"),
"joule",
): "noise, octave 6, day time, urban",
(
"noise, octave 7, day time, urban",
("octave 7", "day time", "urban"),
"joule",
): "noise, octave 7, day time, urban",
(
"noise, octave 8, day time, urban",
("octave 8", "day time", "urban"),
"joule",
): "noise, octave 8, day time, urban",
(
"noise, octave 1, day time, suburban",
("octave 1", "day time", "suburban"),
"joule",
): "noise, octave 1, day time, suburban",
(
"noise, octave 2, day time, suburban",
("octave 2", "day time", "suburban"),
"joule",
): "noise, octave 2, day time, suburban",
(
"noise, octave 3, day time, suburban",
("octave 3", "day time", "suburban"),
"joule",
): "noise, octave 3, day time, suburban",
(
"noise, octave 4, day time, suburban",
("octave 4", "day time", "suburban"),
"joule",
): "noise, octave 4, day time, suburban",
(
"noise, octave 5, day time, suburban",
("octave 5", "day time", "suburban"),
"joule",
): "noise, octave 5, day time, suburban",
(
"noise, octave 6, day time, suburban",
("octave 6", "day time", "suburban"),
"joule",
): "noise, octave 6, day time, suburban",
(
"noise, octave 7, day time, suburban",
("octave 7", "day time", "suburban"),
"joule",
): "noise, octave 7, day time, suburban",
(
"noise, octave 8, day time, suburban",
("octave 8", "day time", "suburban"),
"joule",
): "noise, octave 8, day time, suburban",
(
"noise, octave 1, day time, rural",
("octave 1", "day time", "rural"),
"joule",
): "noise, octave 1, day time, rural",
(
"noise, octave 2, day time, rural",
("octave 2", "day time", "rural"),
"joule",
): "noise, octave 2, day time, rural",
(
"noise, octave 3, day time, rural",
("octave 3", "day time", "rural"),
"joule",
): "noise, octave 3, day time, rural",
(
"noise, octave 4, day time, rural",
("octave 4", "day time", "rural"),
"joule",
): "noise, octave 4, day time, rural",
(
"noise, octave 5, day time, rural",
("octave 5", "day time", "rural"),
"joule",
): "noise, octave 5, day time, rural",
(
"noise, octave 6, day time, rural",
("octave 6", "day time", "rural"),
"joule",
): "noise, octave 6, day time, rural",
(
"noise, octave 7, day time, rural",
("octave 7", "day time", "rural"),
"joule",
): "noise, octave 7, day time, rural",
(
"noise, octave 8, day time, rural",
("octave 8", "day time", "rural"),
"joule",
): "noise, octave 8, day time, rural",
}
self.elec_map = {
"Hydro": (
"electricity production, hydro, run-of-river",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Nuclear": (
"electricity production, nuclear, pressure water reactor",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Gas": (
"electricity production, natural gas, conventional power plant",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Solar": (
"electricity production, photovoltaic, 3kWp slanted-roof installation, multi-Si, panel, mounted",
"DE",
"kilowatt hour",
"electricity, low voltage",
),
"Wind": (
"electricity production, wind, 1-3MW turbine, onshore",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Biomass": (
"heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Coal": (
"electricity production, hard coal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Oil": (
"electricity production, oil",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Geo": (
"electricity production, deep geothermal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Waste": (
"treatment of municipal solid waste, incineration",
"DE",
"kilowatt hour",
"electricity, for reuse in municipal waste incineration only",
),
}
self.index_noise = [self.inputs[i] for i in self.map_noise_emissions.keys()]
self.list_cat, self.split_indices = self.get_split_indices()
self.method = method
if self.method == "recipe":
self.method_type = method_type
else:
self.method_type = "midpoint"
self.impact_categories = self.get_dict_impact_categories()
# Load the B matrix
self.B = self.get_B_matrix()
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct - exhaust",
"direct - non-exhaust",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = list(self.impact_categories.keys())
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
np.arange(0, self.iterations),
],
dims=[
"impact_category",
"size",
"powertrain",
"year",
"impact",
"value",
],
)
else:
params = [a for a in self.array.value.values]
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
params,
],
dims=["impact_category", "size", "powertrain", "year", "parameter"],
)
return response
def get_split_indices(self):
"""
Return list of indices to split the results into categories.
:return: list of indices
:rtype: list
"""
filename = "dict_split.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError("The dictionary of splits could not be found.")
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, _, *header), *data = csv_list
csv_dict = {}
for row in data:
key, sub_key, *values = row
if key in csv_dict:
if sub_key in csv_dict[key]:
csv_dict[key][sub_key].append(
{"search by": values[0], "search for": values[1]}
)
else:
csv_dict[key][sub_key] = [
{"search by": values[0], "search for": values[1]}
]
else:
csv_dict[key] = {
sub_key: [{"search by": values[0], "search for": values[1]}]
}
flatten = itertools.chain.from_iterable
d = {}
l = []
d['direct - exhaust'] = []
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Cadmium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Copper", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Nickel", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Selenium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Zinc", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium VI", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].extend(self.index_emissions)
d['direct - exhaust'].extend(self.index_noise)
l.append(d['direct - exhaust'])
for cat in csv_dict["components"]:
d[cat] = list(
flatten(
[
self.get_index_of_flows([l["search for"]], l["search by"])
for l in csv_dict["components"][cat]
]
)
)
l.append(d[cat])
list_ind = [d[x] for x in d]
maxLen = max(map(len, list_ind))
for row in list_ind:
while len(row) < maxLen:
row.extend([len(self.inputs) - 1])
return list(d.keys()), list_ind
def calculate_impacts(
self, split="components", sensitivity=False
):
# Prepare an array to store the results
results = self.get_results_table(split, sensitivity=sensitivity)
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
# Fill in the A matrix with car parameters
self.set_inputs_in_A_matrix(self.array.values)
# Collect indices of activities contributing to the first level
arr = self.A[0, : -self.number_of_cars, -self.number_of_cars :].sum(axis=1)
ind = np.nonzero(arr)[0]
new_arr = np.float32(
np.zeros((self.A.shape[1], self.B.shape[1], len(self.scope["year"])))
)
f = np.float32(np.zeros(( | np.shape(self.A) | numpy.shape |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
import numpy as np
import collections
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from matplotlib import rc
rc('axes', linewidth=1.5)
rc('font', weight='bold', size=15)
def get_subtype_pool(csv_file, sub_type, pool):
with open(csv_file, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row[sub_type]=='1' and row['ADD']=='0':
pool[row['filename']] = 0
if row['ADD'] == '1':
pool[row['filename']] = 1
def get_score_label(csv_file, pool):
# get the raw scores and labels from the csv_file for the ROC PR curves
score, label = [], []
with open(csv_file, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['filename'] in pool:
score.append(float(row['ADD_score']))
label.append(pool[row['filename']])
return score, label
def generate_roc(gt_files, csv_files, sub_type, color, out_file):
"""
:param csv_files: a list of csv files as above format
:param positive_label: if sub_type == 'FTD', the curve is about ADD vs FTD
if sub_type == 'VD', the curve is about ADD vs VD
if sub_type == 'PDD', the curve is about ADD vs PDD
if sub_type == 'LBD', the curve is about ADD vs LBD
:param color: color of the roc curve
:param out_file: image filename you want to save as
:return:
"""
lw = 2
text_size = 20
fig, ax = plt.subplots(dpi=100)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
pool = {}
for gt_file in gt_files:
get_subtype_pool(gt_file, sub_type, pool)
ADD_count, other_count = 0, 0
for key in pool:
if pool[key] == 1: ADD_count += 1
if pool[key] == 0: other_count += 1
print('ADD count {} and {} count {}'.format(ADD_count, sub_type, other_count))
for csvfile in csv_files:
scores, labels = get_score_label(csvfile, pool)
fpr, tpr, thres = roc_curve(labels, scores, pos_label=1)
AUC = auc(fpr, tpr)
ax.plot(fpr, tpr, lw=lw / 2, alpha=0.15)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(AUC)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'AUC=%0.3f$\pm$%0.3f' % (mean_auc, std_auc),
lw=2, alpha=.8)
ax.plot([0, 1], [0, 1], 'k--', lw=lw)
std_tpr = np.std(tprs, axis=0)
tprs_upper = | np.minimum(mean_tpr + std_tpr, 1) | numpy.minimum |
from __future__ import print_function, division
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
from emnist import extract_training_samples
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Model
from keras.optimizers import Adam
from sklearn.utils import shuffle
class DCGAN():
def __init__(self,
n_xin=40000,
n_xout=10000,
mia_attacks=None,
use_advreg=False):
""" Builds a DCGAN model with adversarial attacks
:param n_xin, n_xout Size of training and out-of-distribution data
:param mia_attacks List with following possible values ["logan", "dist", "featuremap"]
:param use_advreg Build with advreg or without
"""
if mia_attacks is None:
mia_attacks = []
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.use_advreg = use_advreg
self.mia_attacks = mia_attacks
np.random.seed(0)
# Load the EMNIST data
(self.x_in, y_in), (self.x_out, y_out) = self.load_emnist_data(n_xin=n_xin, n_xout=n_xout)
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.featuremap_model, self.discriminator, self.critic_model_with_advreg, self.advreg_model = self.build_discriminator(
optimizer)
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def get_xin(self):
""" Gets the members
"""
return self.x_in
def get_xout(self):
""" Gets the non-members
"""
return self.x_out
def load_emnist_data(self,
n_xin,
n_xout):
"""
Load x_in, x_out and the test set
@:param n_xin: Size of the X_in dataset
@:param n_xout: Size of the X_out dataset
@:return xin, xout, test
"""
def normalize(data):
return np.reshape((data.astype(np.float32) - 127.5) / 127.5, (-1, 28, 28, 1))
# Load and normalize the training data
(x_train, y_train) = extract_training_samples('digits')
x_train = normalize(x_train)
# Shuffle for some randomness
x_train, y_train = shuffle(x_train, y_train)
assert (n_xin + n_xout < len(x_train)) # No overflow, sizes have to be assured
# Split into x_in and x_out
x_in, y_in = x_train[:n_xin], y_train[:n_xin]
x_out, y_out = x_train[n_xin:n_xin + n_xout], y_train[n_xin:n_xin + n_xout]
return (x_in, y_in), (x_out, y_out)
def wasserstein_loss(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def build_advreg(self, input_shape):
""" Build the model for the adversarial regularizer
"""
advreg_in = Input(input_shape)
l0 = Dense(units=500)(advreg_in)
l1 = Dropout(0.2)(l0)
l2 = Dense(units=250)(l1)
l3 = Dropout(0.2)(l2)
l4 = Dense(units=10)(l3)
advreg_out = Dense(units=1, activation="linear")(l4)
return Model(advreg_in, advreg_out)
def build_generator(self):
input_data = Input((self.latent_dim,))
l0 = Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)(input_data)
l1 = Reshape((7, 7, 128))(l0)
l2 = UpSampling2D()(l1)
l3 = Conv2D(128, kernel_size=3, padding="same")(l2)
l4 = BatchNormalization(momentum=0.8)(l3)
l5 = Activation("relu")(l4)
l6 = UpSampling2D()(l5)
l7 = Conv2D(64, kernel_size=3, padding="same")(l6)
l8 = BatchNormalization(momentum=0.8)(l7)
l9 = Activation("relu")(l8)
l10 = Conv2D(self.channels, kernel_size=3, padding="same")(l9)
output = Activation("tanh")(l10)
return Model(input_data, output)
def build_discriminator(self, optimizer):
dropout = 0.25
img_shape = (28, 28, 1)
critic_in = Input(img_shape)
l0 = Conv2D(16, kernel_size=3, strides=2, input_shape=img_shape, padding="same")(critic_in)
l1 = LeakyReLU(alpha=0.2)(l0)
l2 = Dropout(dropout)(l1)
l3 = Conv2D(32, kernel_size=3, strides=2, padding="same")(l2)
l4 = ZeroPadding2D(padding=((0, 1), (0, 1)))(l3)
l5 = BatchNormalization(momentum=0.8)(l4)
l6 = LeakyReLU(alpha=0.2)(l5)
l7 = Dropout(dropout)(l6)
l8 = Conv2D(64, kernel_size=3, strides=2, padding="same")(l7)
l9 = BatchNormalization(momentum=0.8)(l8)
l10 = LeakyReLU(alpha=0.2)(l9)
l11 = Dropout(dropout)(l10)
l12 = Conv2D(128, kernel_size=3, strides=1, padding="same")(l11)
l13 = BatchNormalization(momentum=0.8)(l12)
l14 = LeakyReLU(alpha=0.2)(l13)
l15 = Dropout(dropout)(l14)
featuremaps = Flatten()(l15)
critic_out = Dense(1, activation="sigmoid", name="critic_out")(featuremaps)
""" Build the critic WITHOUT the adversarial regularization
"""
critic_model_without_advreg = Model(inputs=[critic_in], outputs=[critic_out])
critic_model_without_advreg.compile(optimizer=optimizer,
metrics=["accuracy"],
loss='binary_crossentropy')
""" Build the adversarial regularizer
If no adversarial regularization is required, disable it in the training function /!\
"""
featuremap_model = Model(inputs=[critic_in], outputs=[featuremaps])
advreg = self.build_advreg(input_shape=(2048,))
mia_pred = advreg(featuremap_model(critic_in))
naming_layer = Lambda(lambda x: x, name='mia_pred')
mia_pred = naming_layer(mia_pred)
advreg_model = Model(inputs=[critic_in], outputs=[mia_pred])
# Do not train the critic when updating the adversarial regularizer
featuremap_model.trainable = False
def advreg(y_true, y_pred):
return 2*K.binary_crossentropy(y_true, y_pred)
advreg_model.compile(optimizer=optimizer,
metrics=["accuracy"],
loss=self.wasserstein_loss)
""" Build the critic WITH the adversarial regularization
"""
critic_model_with_advreg = Model(inputs=[critic_in], outputs=[critic_out, mia_pred])
advreg_model.trainable = False
def critic_out(y_true, y_pred):
return K.binary_crossentropy(y_true, y_pred)
def mia_pred(y_true, y_pred):
return K.binary_crossentropy(y_true, y_pred)
critic_model_with_advreg.compile(optimizer=optimizer,
metrics=["accuracy"],
loss={
"critic_out": self.wasserstein_loss,
"mia_pred": self.wasserstein_loss
})
return featuremap_model, critic_model_without_advreg, critic_model_with_advreg, advreg_model
def train(self, epochs, batch_size=128, save_interval=50):
logan_precisions, featuremap_precisions = [], []
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
idx = np.random.randint(0, len(self.x_in), batch_size)
imgs = self.x_in[idx]
idx_out = np.random.randint(0, len(self.x_out), batch_size)
imgs_out = self.x_out[idx_out]
# Sample noise and generate a batch of new images
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_imgs = self.generator.predict(noise)
if self.use_advreg:
# Train the critic to make the advreg model produce FAKE labels
d_loss_real = self.discriminator.train_on_batch(imgs, valid) # valid data
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Tried random, tried valid, next try skipping
self.critic_model_with_advreg.train_on_batch(imgs_out, [valid, valid])
d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
else:
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
# ---------------------
# Train AdvReg
# Do this in the outer loop to give the discriminator a chance to adapt
# ---------------------
if self.use_advreg:
def sample_target(size):
return np.random.randint(0, 2, size)
idx_out = np.random.randint(0, len(self.x_out), batch_size)
imgs_out = self.x_out[idx_out]
idx_in = np.random.randint(0, len(self.x_in), batch_size)
imgs = self.x_in[idx_in]
adv_x, adv_y = shuffle( | np.concatenate((imgs, imgs_out)) | numpy.concatenate |
"Import all modules for the FOCUS benchmark on BenchSet"
from model import XGB_Ensemble
from utils import getmorgan, make2D
import pandas as pd
from rdkit import Chem
import numpy as np
from rdkit.Chem.SaltRemover import SaltRemover
#%%
"HELPER FUNCTIONS"
def getlabel(section):
"""
Converts all activity outcome strings in either 1, 0 or None for a list
"""
for i in range(len(section)):
if section[i] == 'Active':
section[i] = 1
elif section[i] == 'Inactive':
section[i] = 0
else:
section[i] = None
return section
def get_verdict(df, target):
"""
For a given CID, looks up on the whole dataframe all instances of that CID,
collects the labels, checks if there's any disagreements and returns a verdict
"""
#find all instances of the CID as a slice of the original df
sl = df['cid'] == target
sl = df[sl]
#convert first into list, then into numpy array of 1/0/None
acts = list(sl['activity'])
acts = np.array(getlabel(acts))
try:
total = | np.sum(acts) | numpy.sum |
import math
import numpy as np
import os
import gym
import opensim
import random
import time
def convert_to_gym(space):
return gym.spaces.Box( | np.array(space[0]) | numpy.array |
#!/usr/bin/env python
from numba import njit, prange, set_num_threads
from tools.nc_reader import nc_reader
import netCDF4 as nc
import numpy as np
from glob import glob
from math import erf
import argparse
import os
# Note: check division by zero errors.
projfac = 12 # Subgrid zoom factor
r_limit_fac = 4.0 # How far out we project the parcels, expressed in ellipsoid radii.
parser = argparse.ArgumentParser()
parser.add_argument("input_file_name", type=str)
parser.add_argument("a_fac", type=float)
parser.add_argument("b_fac", type=float)
parser.add_argument('-f','--fields',
nargs='+',
type=str,
help='List of fields to refine.',
required=True)
parser.add_argument('--nthreads',
type=int,
help='Number of threads.',
default=1)
parser.add_argument('-s','--steps',
nargs='+',
type=int,
help='List of steps to refine.',
required=True)
args = parser.parse_args()
input_file_name = args.input_file_name
a_fac = args.a_fac
b_fac = args.b_fac
fields = args.fields
steps = args.steps
nthreads = args.nthreads
set_num_threads(nthreads)
file_root, file_ext = os.path.splitext(input_file_name)
if not (file_ext == ".nc"):
raise argparse.ArgumentTypeError('Argument filename must end on ".nc"')
ncreader = nc_reader()
ncreader.open(input_file_name)
# set up spatial arrays
extent = ncreader.get_box_extent()
origin = ncreader.get_box_origin()
ncells = ncreader.get_box_ncells()
# initialisation
nx = ncells[0]
nz = ncells[1] # Don't add 1 (this is non-periodic) here
# projection grid
nxproj = nx * projfac
nzproj = nz * projfac + 1 # But do add 1 here
dx = extent[0] / nx
dz = extent[1] / nz
dx_project = dx / projfac
dz_project = dz / projfac
dxi_project = projfac / dx # Inverse of projection grid distance
dzi_project = projfac / dz
# Projection grid coordinates
xp = | np.linspace(origin[0], origin[0] + extent[0], nxproj + 1) | numpy.linspace |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class RedRio:
def __init__(self,codigo = None,**kwargs):
self.info = pd.Series()
self.codigo = codigo
self.info.slug = None
self.fecha = '2006-06-06 06:06'
self.workspace = '/media/'
self.seccion = pd.DataFrame(columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03', u'v04',
u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup'])
self.parametros = "id_aforo,fecha,ancho_superficial,caudal_medio,velocidad_media,perimetro,area_total,profundidad_media,radio_hidraulico"
self.aforo = pd.Series(index = [u'fecha', u'ancho_superficial', u'caudal_medio',
u'velocidad_media',u'perimetro', u'area_total',
u'profundidad_media', u'radio_hidraulico',u'levantamiento'])
self.levantamiento = pd.DataFrame(columns = ['vertical','x','y'])
self.alturas = pd.DataFrame(index=pd.date_range(start = pd.to_datetime('2018').strftime('%Y-%m-%d 06:00'),periods=13,freq='H'),columns = ['profundidad','offset','lamina','caudal'])
self.alturas.index = map(lambda x:x.strftime('%H:00'),self.alturas.index)
@property
def caudales(self):
pass
@property
def folder_path(self):
return self.workspace+pd.to_datetime(self.fecha).strftime('%Y%m%d')+'/'+self.info.slug+'/'
def insert_vel(self,vertical,v02,v04,v08):
self.seccion.loc[vertical,'v02'] = v02
self.seccion.loc[vertical,'v04'] = v04
self.seccion.loc[vertical,'v08'] = v08
def velocidad_media_dovela(self):
columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03',
u'v04', u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup']
dfs = self.seccion[columns].copy()
self.seccion['vm'] = np.NaN
vm = []
for index in dfs.index:
vm.append(round(self.estima_velocidad_media_vertical(dfs.loc[index].dropna()),3))
self.seccion['vm'] = vm
def area_dovela(self):
self.seccion['area'] = self.get_area(self.seccion['x'].abs().values,self.seccion['y'].abs().values)
def estima_velocidad_media_vertical(self,vertical,factor=0.0,v_index=0.8):
vertical = vertical[vertical.index!='vm']
index = list(vertical.index)
if index == ['vertical','x','y']:
if vertical['x'] == 0.0:
vm = factor * self.seccion.loc[vertical.name+1,'vm']
else:
vm = factor * self.seccion.loc[vertical.name-1,'vm']
elif (index == ['vertical','x','y','vsup']) or (index == ['vertical','x','y','v08']):
try:
vm = v_index*vertical['vsup']
except:
vm = v_index*vertical['v08']
elif (index == ['vertical','x','y','v04']) or (index == ['vertical','x','y','v04','vsup']):
vm = vertical['v04']
elif (index == ['vertical','x','y','v04','v08']) or (index == ['vertical','x','y','v04','v08','vsup']) or (index == ['vertical','x','y','v02','v04']):
vm = vertical['v04']
elif index == ['vertical','x','y','v08','vsup']:
vm = v_index*vertical['vsup']
elif (index == ['vertical','x','y','v02','v04','v08']) or (index == ['vertical','x','y','v02','v04','v08','vsup']):
vm = (2*vertical['v04']+vertical['v08']+vertical['v02'])/4.0
elif (index == ['vertical','x','y','v02','v08']):
vm = (vertical['v02']+vertical['v08'])/2.0
return vm
def perimetro(self):
x,y = (self.seccion['x'].values,self.seccion['y'].values)
def perimeter(x,y):
p = []
for i in range(len(x)-1):
p.append(round(float(np.sqrt(abs(x[i]-x[i+1])**2.0+abs(y[i]-y[i+1])**2.0)),3))
return [0]+p
self.seccion['perimetro'] = perimeter(self.seccion['x'].values,self.seccion['y'].values)
def get_area(self,x,y):
'''Calcula las áreas y los caudales de cada
una de las verticales, con el método de mid-section
Input:
x = Distancia desde la banca izquierda, type = numpy array
y = Produndidad
v = Velocidad en la vertical
Output:
area = Área de la subsección
Q = Caudal de la subsección
'''
# cálculo de áreas
d = np.absolute(np.diff(x))/2.
b = x[:-1]+d
area = np.diff(b)*y[1:-1]
area = np.insert(area, 0, d[0]*y[0])
area = np.append(area,d[-1]*y[-1])
area = np.absolute(area)
# cálculo de caudal
return np.round(area,3)
def read_excel_format(self,file):
df = pd.read_excel(file)
df = df.loc[df['x'].dropna().index]
df['vertical'] = range(1,df.index.size+1)
df['y'] = df['y'].abs()*-1
df.columns = map(lambda x:x.lower(),df.columns)
self.seccion = df[self.seccion.columns]
df = pd.read_excel(file,sheetname=1)
self.aforo.fecha = df.iloc[1].values[1].strftime('%Y-%m-%d')+df.iloc[2].values[1].strftime(' %H:%M')
self.aforo['x_sensor'] = df.iloc[4].values[1]
self.aforo['lamina'] = df.iloc[5].values[1]
df = pd.read_excel(file,sheetname=2)
self.levantamiento = df[df.columns[1:]]
self.levantamiento.columns = ['x','y']
self.levantamiento.index.name = 'vertical'
self.aforo.levantamiento = True
def plot_bars(self,s,filepath=None,bar_fontsize=14,decimales=2,xfactor =1.005,yfactor=1.01,ax=None):
if ax is None:
plt.figure(figsize=(20,6))
s.plot(kind='bar',ax=ax)
ax.set_ylim(s.min()*0.01,s.max()*1.01)
for container in ax.containers:
plt.setp(container, width=0.8)
for p in ax.patches:
ax.annotate(str(round(p.get_height(),decimales)),
(p.get_x() * xfactor, p.get_height() * yfactor),
fontsize = bar_fontsize)
for j in ['top','right']:
ax.spines[j].set_edgecolor('white')
ax.set_ylabel(r'$Caudal\ [m^3/s]$')
if filepath:
plt.savefig(filepath,bbox_inches='tight')
def plot_levantamientos(self):
for id_aforo in self.levantamientos:
self.plot_section(self.get_levantamiento(id_aforo),x_sensor=2,level=0.0)
plt.title("%s : %s,%s"%(self.info.slug,self.codigo,id_aforo))
def procesa_aforo(self):
self.velocidad_media_dovela()
self.area_dovela()
self.seccion['caudal'] = np.round(np.array(self.seccion.vm*self.seccion.area),3)
self.perimetro()
self.aforo.caudal_medio = round(self.seccion.caudal.sum(),3)
self.aforo.area_total = round(self.seccion.area.sum(),3)
self.aforo.velocidad_media = round(self.aforo.caudal_medio/self.aforo.area_total,3)
self.aforo.ancho_superficial = self.seccion['x'].abs().max()-self.seccion['x'].abs().min()
self.aforo.perimetro = round(self.seccion.perimetro.sum(),3)
self.aforo.profundidad_media = round(self.seccion['y'].abs()[self.seccion['y'].abs()>0.0].mean(),3)
self.aforo.radio_hidraulico = round(self.aforo.area_total/self.aforo.perimetro,3)
self.fecha = self.aforo.fecha
def ajusta_levantamiento(self):
cond = (self.levantamiento['x']<self.aforo.x_sensor).values
flag = cond[0]
for i,j in enumerate(cond):
if j==flag:
pass
else:
point = ((self.levantamiento.iloc[i-1].x,self.levantamiento.iloc[i-1].y),(self.levantamiento.iloc[i].x,self.levantamiento.iloc[i].y))
flag = j
point2 = ((self.aforo.x_sensor,0.1*self.levantamiento['y'].min()),((self.aforo.x_sensor,1.1*self.levantamiento['y'].max())))
intersection = self.line_intersection(point,point2)
self.levantamiento = self.levantamiento.append(pd.DataFrame(np.matrix(intersection),index=['self.aforo.x_sensor'],columns=['x','y'])).sort_values('x')
self.levantamiento['y'] = self.levantamiento['y']-intersection[1]
self.levantamiento['vertical'] = range(1,self.levantamiento.index.size+1)
self.levantamiento.index = range(0,self.levantamiento.index.size)
def line_intersection(self,line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return (x, y)
def get_sections(self,levantamiento,level):
hline = ((levantamiento['x'].min()*1.1,level),(levantamiento['x'].max()*1.1,level)) # horizontal line
lev = pd.DataFrame.copy(levantamiento) #df to modify
#PROBLEMAS EN LOS BORDES
borderWarning = 'Warning:\nProblemas de borde en el levantamiento'
if lev.iloc[0]['y']<level:
lev = pd.DataFrame( | np.matrix([lev.iloc[0]['x'],level]) | numpy.matrix |
import numpy as np
from scipy.stats import loguniform
class example_prior:
"""
This example class defines an example prior class to handle both evaluations and
sampling of the prior
"""
def sample(self, nsamples = None):
"""
Function that sample points from the prior
"""
if nsamples is None:
nsamples = self.nsamples
# Evaluate samples:
mu_samples = np.random.uniform(self.mu1, self.mu2, nsamples)
sigma_samples = loguniform.rvs(self.sigma1, self.sigma2, size = nsamples)
# Return them:
return mu_samples, sigma_samples
def validate(self, theta):
"""
This function validates that the set of parameters to evaluate
are within the ranges of the prior
"""
# Extract current parameters to evaluate the priors on:
mu, sigma = theta
# Validate the uniform prior:
if mu <= self.mu1 or mu >= self.mu2:
return False
# Validate the loguniform prior:
if sigma <= self.sigma1 or sigma >= self.sigma2:
return False
# If all is good, return a nice True:
return True
def evaluate(self, theta):
"""
Given an input vector, evaluate the prior. In this case, this just returns the
priors defined by the hyperparameters. For the uniform prior, the value of the
prior doesn't depend on the inputs. For the loguniform, that's note the case.
"""
# Extract current parameters to evaluate the priors on:
mu, sigma = theta
# Return the prior evaluated on theta:
return self.mu_prior * (self.sigma_factor / sigma)
def __init__(self, mu1 = -100, mu2 = 100, sigma1 = 0.1, sigma2 = 100., nsamples = 100):
# Define hyperparameters of the prior. First for mu (uniform prior):
self.mu1 = mu1
self.mu2 = mu2
# Value of the prior given hyperparameters:
self.mu_prior = 1. / (mu2 - mu1)
# Same for sigma (log-uniform):
self.sigma1 = sigma1
self.sigma2 = sigma2
# Compute factor that will multiply 1/x where x is the input to evaluate the
# prior:
la = np.log(sigma1)
lb = np.log(sigma2)
self.sigma_factor = 1./(lb - la)
# Define the default number of samples:
self.nsamples = nsamples
def gen_fake_data(length = 1000):
mu, sigma = 50., 10.
simulated_data = np.random.normal(mu, sigma, length)
fout = open('basic_data.dat', 'w')
for i in range(len(simulated_data)):
fout.write(str(simulated_data[i])+'\n')
fout.close()
class example_simulator:
"""
This example class generates a simulator object that is able to simulate several or
single simulations
"""
def single_simulation(self, parameters):
# Extract parameters:
mu, sigma = parameters
return np.random.normal(mu, sigma, self.length)
def several_simulations(self, parameters):
# Extract parameters:
mus, sigmas = parameters
nsamples = len(mus)
# Define array to store simulations:
simulations = np.zeros([nsamples, self.length])
# Lazy loop to do several of these; could apply multi-processing:
for i in range(nsamples):
simulations[i,:] = self.single_simulation([mus[i], sigmas[i]])
return simulations
def __init__(self, length = 1000):
self.length = length
class example_distance:
"""
Example class for distance.
"""
def single_distance(self, simulation):
""" Given a dataset and a simulation, this function returns the distance
between them. This is defined here as the sum of the absolute deviation between
the data and a given simulation """
sim_mean = np.mean(simulation)
sim_var = np.var(simulation)
return np.abs( (sim_mean - self.data_mean) / self.data_mean) + \
np.abs( (sim_var - self.data_variance) / self.data_variance )
def several_distances(self, simulations):
""" Same as single distance, several times """
nsimulations = simulations.shape[0]
distances = np.zeros(nsimulations)
for i in range(nsimulations):
distances[i] = self.single_distance(simulations[i,:])
return distances
def __init__(self, data, length = 1000):
self.data = data
self.data_mean = | np.mean(data) | numpy.mean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.