prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import math, sys, os, time
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import bayes_mvs as bayesest
sys.path.insert(0, '../../PyEcoLib')
from simulator import Simulator
mean_size = 3
doubling_time = 18
tmax = 180
sample_time = 2
div_steps = 10
ncells = 1000
gr = np.log(2)/doubling_time
kd = div_steps * gr/mean_size
sampling_time = sample_time
rprom = 10
pprom = 1000
gammar = 5 * gr
kr = rprom*(gr+gammar)
kp = pprom*gr/rprom
pop = np.zeros([ncells, 6])
indexes = np.int(tmax/sampling_time)
rarray = | np.zeros([ncells, indexes]) | numpy.zeros |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 22:16, 21/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from numpy import dot, ones, array, ceil
from opfunu.cec.cec2014.utils import *
SUPPORT_DIMENSION = [2, 10, 20, 30, 50, 100]
SUPPORT_DIMENSION_2 = [10, 20, 30, 50, 100]
def F1(solution=None, name="Rotated High Conditioned Elliptic Function", shift_data_file="shift_data_1.txt", bias=100):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_1_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = dot(solution - shift_data, matrix)
return f1_elliptic__(z) + bias
def F2(solution=None, name="Rotated Bent Cigar Function", shift_data_file="shift_data_2.txt", bias=200):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_2_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = dot(solution - shift_data, matrix)
return f2_bent_cigar__(z) + bias
def F3(solution=None, name="Rotated Discus Function", shift_data_file="shift_data_3.txt", bias=300):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_3_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = dot(solution - shift_data, matrix)
return f3_discus__(z) + bias
def F4(solution=None, name="Shifted and Rotated Rosenbrock’s Function", shift_data_file="shift_data_4.txt", bias=400):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_4_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 2.048 * (solution - shift_data) / 100
z = dot(z, matrix) + 1
return f4_rosenbrock__(z) + bias
def F5(solution=None, name="Shifted and Rotated Ackley’s Function", shift_data_file="shift_data_5.txt", bias=500):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_5_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = dot(solution - shift_data, matrix)
return f5_ackley__(z) + bias
def F6(solution=None, name="Shifted and Rotated Weierstrass Function", shift_data_file="shift_data_6.txt", bias=600):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_6_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 0.5 * (solution - shift_data) / 100
z = dot(z, matrix)
return f6_weierstrass__(z) + bias
def F7(solution=None, name="Shifted and Rotated Griewank’s Function", shift_data_file="shift_data_7.txt", bias=700):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_7_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 600 * (solution - shift_data) / 100
z = dot(z, matrix)
return f7_griewank__(z) + bias
def F8(solution=None, name="Shifted Rastrigin’s Function", shift_data_file="shift_data_8.txt", bias=800):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_8_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5.12 * (solution - shift_data) / 100
z = dot(z, matrix)
return f8_rastrigin__(z) + bias
def F9(solution=None, name="Shifted and Rotated Rastrigin’s Function", shift_data_file="shift_data_9.txt", bias=900):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_9_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5.12 * (solution - shift_data) / 100
z = dot(z, matrix)
return f9_modified_schwefel__(z) + bias
def F10(solution=None, name="Shifted Schwefel’s Function", shift_data_file="shift_data_10.txt", bias=1000):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_10_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 1000 * (solution - shift_data) / 100
z = dot(z, matrix)
return f9_modified_schwefel__(z) + bias
def F11(solution=None, name="Shifted and Rotated Schwefel’s Function", shift_data_file="shift_data_11.txt", bias=1100):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_11_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 1000 * (solution - shift_data) / 100
z = dot(z, matrix)
return f9_modified_schwefel__(z) + bias
def F12(solution=None, name="Shifted and Rotated Katsuura Function", shift_data_file="shift_data_12.txt", bias=1200):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_12_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5 * (solution - shift_data) / 100
z = dot(z, matrix)
return f10_katsuura__(z) + bias
def F13(solution=None, name="Shifted and Rotated HappyCat Function", shift_data_file="shift_data_13.txt", bias=1300):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_13_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5 * (solution - shift_data) / 100
z = dot(z, matrix)
return f11_happy_cat__(z) + bias
def F14(solution=None, name="Shifted and Rotated HGBat Function", shift_data_file="shift_data_14.txt", bias=1400):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_14_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5 * (solution - shift_data) / 100
z = dot(z, matrix)
return f12_hgbat__(z) + bias
def F15(solution=None, name="Shifted and Rotated Expanded Griewank’s plus Rosenbrock’s Function", shift_data_file="shift_data_15.txt", bias=1500):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_15_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = 5 * (solution - shift_data) / 100
z = dot(z, matrix) + 1
return f13_expanded_griewank__(z) + bias
def F16(solution=None, name="Shifted and Rotated Expanded Scaffer’s F6 Function", shift_data_file="shift_data_16.txt", bias=1600):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION:
f_matrix = "M_16_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 2, 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
z = dot(solution - shift_data, matrix) + 1
return f14_expanded_scaffer__(z) + bias
### ================== Hybrid function ========================
def F17(solution=None, name="Hybrid Function 1", shift_data_file="shift_data_17.txt", bias=1700, shuffle=None):
problem_size = len(solution)
p = array([0.3, 0.3, 0.4])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_17_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_17_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1+n2)]
idx3 = shuffle[(n1+n2):]
mz = dot(solution - shift_data, matrix)
return f9_modified_schwefel__(mz[idx1]) + f8_rastrigin__(mz[idx2]) + f1_elliptic__(mz[idx3]) + bias
def F18(solution=None, name="Hybrid Function 2", shift_data_file="shift_data_18.txt", bias=1800, shuffle=None):
problem_size = len(solution)
p = array([0.3, 0.3, 0.4])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_18_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_18_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1 + n2)]
idx3 = shuffle[(n1 + n2):]
mz = dot(solution - shift_data, matrix)
return f2_bent_cigar__(mz[idx1]) + f12_hgbat__(mz[idx2]) + f8_rastrigin__(mz[idx3]) + bias
def F19(solution=None, name="Hybrid Function 3", shift_data_file="shift_data_19.txt", bias=1900, shuffle=None):
problem_size = len(solution)
p = array([0.2, 0.2, 0.3, 0.3])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
n3 = int(ceil(p[2] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_19_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_19_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1 + n2)]
idx3 = shuffle[(n1 + n2):(n1+n2+n3)]
idx4 = shuffle[n1+n2+n3:]
mz = dot(solution - shift_data, matrix)
return f7_griewank__(mz[idx1]) + f6_weierstrass__(mz[idx2]) + f4_rosenbrock__(mz[idx3]) + f14_expanded_scaffer__(mz[idx4])+ bias
def F20(solution=None, name="Hybrid Function 4", shift_data_file="shift_data_20.txt", bias=2000, shuffle=None):
problem_size = len(solution)
p = array([0.2, 0.2, 0.3, 0.3])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
n3 = int(ceil(p[2] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_20_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_20_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1 + n2)]
idx3 = shuffle[(n1 + n2):(n1 + n2 + n3)]
idx4 = shuffle[n1 + n2 + n3:]
mz = dot(solution - shift_data, matrix)
return f12_hgbat__(mz[idx1]) + f3_discus__(mz[idx2]) + f13_expanded_griewank__(mz[idx3]) + f8_rastrigin__(mz[idx4]) + bias
def F21(solution=None, name="Hybrid Function 5", shift_data_file="shift_data_21.txt", bias=2100, shuffle=None):
problem_size = len(solution)
p = array([0.1, 0.2, 0.2, 0.2, 0.3])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
n3 = int(ceil(p[2] * problem_size))
n4 = int(ceil(p[3] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_21_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_21_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1 + n2)]
idx3 = shuffle[(n1 + n2):(n1 + n2 + n3)]
idx4 = shuffle[(n1+n2+n3):(n1+n2+n3+n4)]
idx5 = shuffle[n1+n2+n3+n4:]
mz = dot(solution - shift_data, matrix)
return f14_expanded_scaffer__(mz[idx1]) + f12_hgbat__(mz[idx2]) + f4_rosenbrock__(mz[idx3]) + \
f9_modified_schwefel__(mz[idx4]) + f1_elliptic__(mz[idx5]) + bias
def F22(solution=None, name="Hybrid Function 6", shift_data_file="shift_data_22.txt", bias=2200, shuffle=None):
problem_size = len(solution)
p = array([0.1, 0.2, 0.2, 0.2, 0.3])
n1 = int(ceil(p[0] * problem_size))
n2 = int(ceil(p[1] * problem_size))
n3 = int(ceil(p[2] * problem_size))
n4 = int(ceil(p[3] * problem_size))
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = "M_22_D" + str(problem_size) + ".txt"
if shuffle is None:
f_shuffle = "shuffle_data_21_D" + str(problem_size) + ".txt"
else:
f_shuffle = "shuffle_data_" + str(shuffle) + "_D" + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_shift_data__(shift_data_file)[:problem_size]
matrix = load_matrix_data__(f_matrix)
shuffle = (load_shift_data__(f_shuffle)[:problem_size] - ones(problem_size)).astype(int)
idx1 = shuffle[:n1]
idx2 = shuffle[n1:(n1 + n2)]
idx3 = shuffle[(n1 + n2):(n1 + n2 + n3)]
idx4 = shuffle[(n1 + n2 + n3):(n1 + n2 + n3 + n4)]
idx5 = shuffle[n1 + n2 + n3 + n4:]
mz = dot(solution - shift_data, matrix)
return f10_katsuura__(mz[idx1]) + f11_happy_cat__(mz[idx2]) + f13_expanded_griewank__(mz[idx3]) + \
f9_modified_schwefel__(mz[idx4]) + f5_ackley__(mz[idx5]) + bias
### ================== Composition function ========================
def F23(solution=None, name="Composition Function 1", f_shift_file="shift_data_23.txt", f_matrix="M_23_D", f_bias=2300):
problem_size = len(solution)
xichma = array([10, 20, 30, 40, 50])
lamda = array([1, 1e-6, 1e-26, 1e-6, 1e-6])
bias = array([0, 100, 200, 300, 400])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated Rosenbrock’s Function F4’
t1 = solution - shift_data[0]
g1 = lamda[0] * f4_rosenbrock__(dot(t1, matrix[:problem_size, :])) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. High Conditioned Elliptic Function F1’
t2 = solution - shift_data[1]
g2 = lamda[1] * f1_elliptic__(solution) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated Bent Cigar Function F2’
t3 = solution - shift_data[2]
g3 = lamda[2] * f2_bent_cigar__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
# 4. Rotated Discus Function F3’
t4 = solution - shift_data[3]
g4 = lamda[3] * f3_discus__(dot(matrix[3 * problem_size: 4 * problem_size, :], t4)) + bias[3]
w4 = (1.0 / sqrt(sum(t4 ** 2))) * exp(-sum(t4 ** 2) / (2 * problem_size * xichma[3] ** 2))
# 4. High Conditioned Elliptic Function F1’
t5 = solution - shift_data[4]
g5 = lamda[4] * f1_elliptic__(solution) + bias[4]
w5 = (1.0 / sqrt(sum(t5 ** 2))) * exp(-sum(t5 ** 2) / (2 * problem_size * xichma[4] ** 2))
sw = sum([w1, w2, w3, w4, w5])
result = (w1 * g1 + w2 * g2 + w3 * g3 + w4 * g4 + w5 * g5) / sw
return result + f_bias
def F24(solution=None, name="Composition Function 2", f_shift_file="shift_data_24.txt", f_matrix="M_24_D", f_bias=2400):
problem_size = len(solution)
xichma = array([20, 20, 20])
lamda = array([1, 1, 1])
bias = array([0, 100, 200])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated Rosenbrock’s Function F4’
t1 = solution - shift_data[0]
g1 = lamda[0] * f9_modified_schwefel__(solution) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. Rotated Rastrigin’s Function F9’
t2 = solution - shift_data[1]
g2 = lamda[1] * f8_rastrigin__(dot(matrix[problem_size: 2 * problem_size], t2)) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated HGBat Function F14’
t3 = solution - shift_data[2]
g3 = lamda[2] * f12_hgbat__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
sw = sum([w1, w2, w3])
result = (w1 * g1 + w2 * g2 + w3 * g3) / sw
return result + f_bias
def F25(solution=None, name="Composition Function 3", f_shift_file="shift_data_25.txt", f_matrix="M_25_D", f_bias=2500):
problem_size = len(solution)
xichma = array([10, 30, 50])
lamda = array([0.25, 1, 1e-7])
bias = array([0, 100, 200])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated Schwefel's Function F11’
t1 = solution - shift_data[0]
g1 = lamda[0] * f9_modified_schwefel__(dot(matrix[:problem_size, :problem_size], t1)) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. Rotated Rastrigin’s Function F9’
t2 = solution - shift_data[1]
g2 = lamda[1] * f8_rastrigin__(dot(matrix[problem_size: 2 * problem_size], t2)) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated High Conditioned Elliptic Function F1'
t3 = solution - shift_data[2]
g3 = lamda[2] * f1_elliptic__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
sw = sum([w1, w2, w3])
result = (w1 * g1 + w2 * g2 + w3 * g3) / sw
return result + f_bias
def F26(solution=None, name="Composition Function 4", f_shift_file="shift_data_26.txt", f_matrix="M_26_D", f_bias=2600):
problem_size = len(solution)
xichma = array([10, 10, 10, 10, 10])
lamda = array([0.25, 1, 1e-7, 2.5, 10])
bias = array([0, 100, 200, 300, 400])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated Schwefel's Function F11’
t1 = solution - shift_data[0]
g1 = lamda[0] * f9_modified_schwefel__(dot(matrix[:problem_size, :], t1)) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. Rotated HappyCat Function F13’
t2 = solution - shift_data[1]
g2 = lamda[1] * f11_happy_cat__(dot(matrix[problem_size:2 * problem_size, :], t2)) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated High Conditioned Elliptic Function F1’
t3 = solution - shift_data[2]
g3 = lamda[2] * f1_elliptic__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
# 4. Rotated Weierstrass Function F6’
t4 = solution - shift_data[3]
g4 = lamda[3] * f6_weierstrass__(dot(matrix[3 * problem_size: 4 * problem_size, :], t4)) + bias[3]
w4 = (1.0 / sqrt(sum(t4 ** 2))) * exp(-sum(t4 ** 2) / (2 * problem_size * xichma[3] ** 2))
# 5. Rotated Griewank’s Function F7’
t5 = solution - shift_data[4]
g5 = lamda[4] * f7_griewank__(dot(matrix[4*problem_size:, :], t5)) + bias[4]
w5 = (1.0 / sqrt(sum(t5 ** 2))) * exp(-sum(t5 ** 2) / (2 * problem_size * xichma[4] ** 2))
sw = sum([w1, w2, w3, w4, w5])
result = (w1 * g1 + w2 * g2 + w3 * g3 + w4 * g4 + w5 * g5) / sw
return result + f_bias
def F27(solution=None, name="Composition Function 5", f_shift_file="shift_data_27.txt", f_matrix="M_27_D", f_bias=2700):
problem_size = len(solution)
xichma = array([10, 10, 10, 20, 20])
lamda = array([10, 10, 2.5, 25, 1e-6])
bias = array([0, 100, 200, 300, 400])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated HGBat Function F14'
t1 = solution - shift_data[0]
g1 = lamda[0] * f12_hgbat__(dot(matrix[:problem_size, :], t1)) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. Rotated Rastrigin’s Function F9’
t2 = solution - shift_data[1]
g2 = lamda[1] * f8_rastrigin__(dot(matrix[problem_size:2 * problem_size, :], t2)) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated Schwefel's Function F11’
t3 = solution - shift_data[2]
g3 = lamda[2] * f9_modified_schwefel__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
# 4. Rotated Weierstrass Function F6’
t4 = solution - shift_data[3]
g4 = lamda[3] * f6_weierstrass__(dot(matrix[3 * problem_size: 4 * problem_size, :], t4)) + bias[3]
w4 = (1.0 / sqrt(sum(t4 ** 2))) * exp(-sum(t4 ** 2) / (2 * problem_size * xichma[3] ** 2))
# 5. Rotated High Conditioned Elliptic Function F1’
t5 = solution - shift_data[4]
g5 = lamda[4] * f1_elliptic__(dot(matrix[4 * problem_size:, :], t5)) + bias[4]
w5 = (1.0 / sqrt(sum(t5 ** 2))) * exp(-sum(t5 ** 2) / (2 * problem_size * xichma[4] ** 2))
sw = sum([w1, w2, w3, w4, w5])
result = (w1 * g1 + w2 * g2 + w3 * g3 + w4 * g4 + w5 * g5) / sw
return result + f_bias
def F28(solution=None, name="Composition Function 6", f_shift_file="shift_data_28.txt", f_matrix="M_28_D", f_bias=2800):
problem_size = len(solution)
xichma = array([10, 20, 30, 40, 50])
lamda = array([2.5, 10, 2.5, 5e-4, 1e-6])
bias = array([0, 100, 200, 300, 400])
if problem_size in SUPPORT_DIMENSION_2:
f_matrix = f_matrix + str(problem_size) + ".txt"
else:
print("CEC 2014 function only support problem size 10, 20, 30, 50, 100")
return 1
shift_data = load_matrix_data__(f_shift_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
matrix = load_matrix_data__(f_matrix)
# 1. Rotated Expanded Griewank’s plus Rosenbrock’s Function F15’
t1 = solution - shift_data[0]
g1 = lamda[0] * F15(solution, bias=0) + bias[0]
w1 = (1.0 / sqrt(sum(t1 ** 2))) * exp(-sum(t1 ** 2) / (2 * problem_size * xichma[0] ** 2))
# 2. Rotated HappyCat Function F13’
t2 = solution - shift_data[1]
g2 = lamda[1] * f11_happy_cat__(dot(matrix[problem_size:2 * problem_size, :], t2)) + bias[1]
w2 = (1.0 / sqrt(sum(t2 ** 2))) * exp(-sum(t2 ** 2) / (2 * problem_size * xichma[1] ** 2))
# 3. Rotated Schwefel's Function F11’
t3 = solution - shift_data[2]
g3 = lamda[2] * f9_modified_schwefel__(dot(matrix[2 * problem_size: 3 * problem_size, :], t3)) + bias[2]
w3 = (1.0 / sqrt(sum(t3 ** 2))) * exp(-sum(t3 ** 2) / (2 * problem_size * xichma[2] ** 2))
# 4. Rotated Expanded Scaffer’s F6 Function F16’
t4 = solution - shift_data[3]
g4 = lamda[3] * f14_expanded_scaffer__(dot(matrix[3 * problem_size: 4 * problem_size, :], t4)) + bias[3]
w4 = (1.0 / sqrt(sum(t4 ** 2))) * exp(-sum(t4 ** 2) / (2 * problem_size * xichma[3] ** 2))
# 5. Rotated High Conditioned Elliptic Function F1’
t5 = solution - shift_data[4]
g5 = lamda[4] * f1_elliptic__(dot(matrix[4 * problem_size:, :], t5)) + bias[4]
w5 = (1.0 / sqrt(sum(t5 ** 2))) * exp(-sum(t5 ** 2) / (2 * problem_size * xichma[4] ** 2))
sw = sum([w1, w2, w3, w4, w5])
result = (w1 * g1 + w2 * g2 + w3 * g3 + w4 * g4 + w5 * g5) / sw
return result + f_bias
def F29(solution=None, name="Composition Function 7", shift_data_file="shift_data_29.txt", f_bias=2900):
num_funcs = 3
problem_size = len(solution)
xichma = array([10, 30, 50])
lamda = array([1, 1, 1])
bias = array([0, 100, 200])
if problem_size > 100:
print("CEC 2014 not support for problem size > 100")
return 1
shift_data = load_matrix_data__(shift_data_file)[:problem_size]
shift_data = shift_data[:, :problem_size]
def __fi__(solution=None, idx=None):
if idx == 0:
return F17(solution, bias=0, shuffle=29)
elif idx == 1:
return F18(solution, bias=0, shuffle=29)
else:
return F19(solution, bias=0, shuffle=29)
weights = ones(num_funcs)
fits = ones(num_funcs)
for i in range(0, num_funcs):
t1 = lamda[i] * __fi__(solution, i) + bias[i]
t2 = 1.0 / sqrt(sum((solution - shift_data[i]) ** 2))
w_i = t2 * exp(-sum((solution - shift_data[i]) ** 2) / (2 * problem_size * xichma[i] ** 2))
weights[i] = w_i
fits[i] = t1
sw = sum(weights)
result = 0.0
for i in range(0, num_funcs):
result += (weights[i] / sw) * fits[i]
return result + f_bias
def F30(solution=None, name="Composition Function 8", shift_data_file="shift_data_30.txt", f_bias=3000):
num_funcs = 3
problem_size = len(solution)
xichma = | array([10, 30, 50]) | numpy.array |
import copy
import functools
import gc
from hfutils.constants import TASK_TO_LABELS
from seaborn.distributions import histplot
import torch
import logging
import numpy as np
from transformers.data.data_collator import (
DataCollatorForSeq2Seq,
default_data_collator,
)
from transformers.utils.dummy_pt_objects import T5ForConditionalGeneration
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
from scipy.optimize import minimize
from sklearn.mixture import GaussianMixture
import os
import sys
from torch.nn.modules.activation import Threshold
from datasets import Dataset, concatenate_datasets
from datasets import load_dataset, load_metric
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel, T5ForConditionalGeneration
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
dataloader,
)
from sklearn.model_selection import train_test_split
from hfutils.logger import Logger
from hfutils.arg_parser import HfArguments
from hfutils.loader import ModelLoader, DatasetLoader
from hfutils.temperature_scaling import ModelWithTemperature
from hfutils.monte_carlo import monte_carlo_bounds
from hfutils.calibration import temperature_scale, temperature_scaling
args = HfArguments()
task_name = args.data_args.task_name
tokenizer, _ = ModelLoader(args).load(load_model=False)
pos_token = tokenizer("false").input_ids[0]
neg_token = tokenizer("true").input_ids[0]
label_tokens = [
tokenizer(label, max_length=2).input_ids[0]
for label in TASK_TO_LABELS[task_name]
if label is not None
]
print(label_tokens)
# exit()
# print(pos_token, neg_token)
# print(tokenizer(list(TASK_TO_LABELS[task_name])).input_ids)
# exit()
home_dir = os.path.expanduser(("~"))
# home_dir = "/mnt/raid1nvme1"
# base_dir = "/mnt/yavin/checkpoints"
base_dir = os.path.join(home_dir, os.path.join("model-finetune", "outputs", "google"))
model_keys = [
"S",
"M",
"L",
"XL",
]
device_map = [
"cuda:0",
"cuda:1",
"cuda:2",
"cuda:3",
]
energy_discount_factor = [
1 / 40,
3 / 40,
10 / 40,
40 / 40,
]
model_paths = [
f"{base_dir}/t5-small-lm-adapt/all/checkpoint-4500",
f"{base_dir}/t5-base-lm-adapt/all/checkpoint-2000",
f"{base_dir}/t5-large-lm-adapt/all/checkpoint-1500",
f"{base_dir}/t5-xl-lm-adapt/all/checkpoint-1500",
]
# model_paths = [
# f"{base_dir}/t5-small-lm-adapt/{task_name}/checkpoint-2420",
# # f"{base_dir}/google/t5-small-lm-adapt/qqp",
# f"{base_dir}/t5-base-lm-adapt/{task_name}/checkpoint-820",
# f"{base_dir}/t5-large-lm-adapt/{task_name}/checkpoint-240",
# # f"{base_dir}/t5-xl-lm-adapt/{task_name}/checkpoint-260",
# ]
# model_paths = [
# f"{base_dir}/t5-small-lm-adapt/{task_name}/checkpoint-5540",
# # f"{base_dir}/google/t5-small-lm-adapt/qqp",
# # f"{base_dir}/t5-base-lm-adapt/{task_name}/checkpoint-1860",
# # f"{base_dir}/t5-large-lm-adapt/{task_name}/checkpoint-1780",
# # f"{base_dir}/t5-xl-lm-adapt/{task_name}/checkpoint-1380",
# ]
model_energy = dict(zip(model_keys, energy_discount_factor))
model_paths = dict(zip(model_keys, model_paths))
model_device = dict(zip(model_keys, device_map))
logger = Logger(__file__, "info", 5000000, 5)
models = dict()
for key in model_paths:
logger.debug("key %s, path %s, device %s", key, model_paths[key], model_device[key])
models[key] = T5ForConditionalGeneration.from_pretrained(
model_paths[key]
) # if key != "S" else DistilBertForSequenceClassification.from_pretrained(model_paths[key])
models[key] = models[key].to(model_device[key])
models[key].eval()
torch.cuda.empty_cache()
gc.collect()
logger.info("model loaded")
# ------------- Dataset Prepare --------------
from hfutils.loader import t5_preprocess_function, load_glue_val
from functools import partial
preprocess_function = partial(
t5_preprocess_function,
tokenizer=tokenizer,
padding="max_length",
max_length=128,
)
# dataset_loader = DatasetLoader(args)
# # train_dataset = dataset_loader.load(tokenizer, partition="validation", create_dataloader=False)
# eval_dataset = dataset_loader.load(
# tokenizer, partition="validation", create_dataloader=False
# )
# logger.debug("eval_dataset %s", eval_dataset)
eval_dataset = load_glue_val(preprocess_function).shuffle()
data_args = args.data_args
# if data_args.pad_to_max_length:
# data_collator = default_data_collator
# else:
# data_collator = DataCollatorForSeq2Seq(tokenizer)
data_collator = DataCollatorForSeq2Seq(tokenizer)
train_len = int(len(eval_dataset) * 0.4)
train = Dataset.from_dict(eval_dataset[:train_len])
test = eval_dataset
# test = Dataset.from_dict(eval_dataset[train_len:])
print(train, test)
train_dataloader = DataLoader(
train,
shuffle=True,
collate_fn=data_collator,
batch_size=data_args.train_bsz,
# drop_last=True,
)
test_dataloader = DataLoader(
test,
shuffle=True,
collate_fn=data_collator,
batch_size=data_args.eval_bsz,
# drop_last=True,
)
m = torch.nn.Softmax(dim=1)
logger.info("data loaded")
# ------------- Train Temperature --------------
# for key in model_keys:
# models[key] = ModelWithTemperature(models[key], tokenizer, model_device[key])
# models[key].set_logger(logger)
# models[key].set_temperature(train_dataloader)
print("temperature loaded")
n_models = len(model_keys)
num_labels = 0
def model_inference(model, batch, temperature=None, device="cuda:0"):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
do_sample=False, # disable sampling to test if batching affects output
return_dict_in_generate=True,
output_scores=True,
)
logits = outputs.scores[0][:, label_tokens]
if temperature is not None:
logits = temperature_scale(logits, temperature)
return logits
long_dataset = concatenate_datasets([eval_dataset] * 10)
# for batch_size in [1, 2, 4, 8, 16, 32, 64, 128, 256]:
# # for batch_size in [32, 64, 128, 256, 512]:
# metric = load_metric("accuracy")
# key = model_keys[0]
# eval_dataloader = DataLoader(
# eval_dataset,
# shuffle=True,
# collate_fn=data_collator,
# batch_size=batch_size,
# )
# for batch in tqdm(train_dataloader, desc="Test Acc"):
# logits = model_inference(models[key], batch, device=model_device[key])
# prediction = np.argmax(logits.detach().cpu().numpy(), axis=-1)
# label = batch["labels"][:, 0] == pos_token
# metric.add_batch(
# predictions=prediction,
# references=label
# )
# print(batch_size, metric.compute())
model_probs = dict(zip(model_keys, [list() for _ in range(n_models)]))
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
labels_list = []
# def agg_logits(hist, curr, pos, device):
# if hist is not None:
# hist = hist.to(device)
# curr_prob, _ = torch.max(torch.float_power(m(curr), 2), dim=-1)
# hist_prob, _ = torch.max(torch.float_power(m(hist), 2), dim=-1)
# diff = torch.abs(hist_prob-curr_prob)
# # print(diff)
# for i in range(len(diff)):
# if diff[i] > 0.2:
# if curr_prob[i] < hist_prob[i]:
# curr[i] = hist[i]
# else:
# curr[i] = (hist[i] * pos + curr[i]) / (pos+1)
# return curr
def agg_logits(hist, curr, pos, device):
# return curr
alpha = 0.6
if hist is not None:
hist = hist.to(device)
# return (hist * pos + curr) / (pos + 1)
return hist * (1 - alpha) + curr * alpha
return curr
with torch.no_grad():
key = model_keys[-1]
for batch in tqdm(train_dataloader, desc="Collect Train Data"):
logits = model_inference(models[key], batch, device=model_device[key])
with torch.no_grad():
for batch in tqdm(train_dataloader, desc="Collect Train Data"):
# input_ids=batch['input_ids']
# attention_mask=batch['attention_mask']
label = batch["labels"][:, 0] == pos_token
label = label.to(torch.int64)
# label = label.to(model_device[key])
num_labels += len(label)
label = label.cpu().detach().flatten()
labels_list.append(label)
hist_logits = None
for i, key in enumerate(model_keys):
logits = model_inference(models[key], batch, device=model_device[key])
# hist_logits = agg_logits(hist_logits, logits, i, model_device[key])
model_outputs[key].append(logits)
# probabilities = torch.float_power(m(hist_logits).cpu().detach(), 2)
# model_ans = torch.argmax(probabilities, dim=-1).flatten()
# model_probs[key] += [[p[model_ans[i]], int(model_ans[i] == label[i])] for i, p in enumerate(probabilities)]
labels = torch.cat(labels_list)
model_temperature = {}
for key in model_keys:
# model_probs[key] = np.array(model_probs[key])
model_outputs[key] = torch.cat(model_outputs[key]).to(model_device[key])
labels = labels.to(model_device[key])
temperature = (
temperature_scaling(model_outputs[key], labels)
.detach()
.cpu()
.numpy()
.tolist()[0]
)
# bar = 1.5
# temperature = bar + (temperature - bar) / 2 if temperature > bar else temperature
model_temperature[key] = torch.nn.Parameter(
torch.ones(1, device=model_device[key]) * temperature
)
hist_logits = None
for i, key in enumerate(model_keys):
model_outputs[key] = temperature_scale(model_outputs[key], model_temperature[key])
hist_logits = agg_logits(
hist_logits if key != model_keys[-1] else None,
model_outputs[key],
i,
model_device[key],
)
# hist_logits = agg_logits(None, model_outputs[key], i, model_device[key])
probabilities = torch.float_power(m(hist_logits).to(model_device[key]), 2)
model_ans = torch.argmax(probabilities, dim=-1).flatten()
model_ans = model_ans.detach().cpu().numpy()
probabilities = probabilities.detach().cpu().numpy()
temp_labels = labels.detach().cpu().numpy()
model_probs[key] = np.array(
[
[p[model_ans[i]], int(model_ans[i] == temp_labels[i])]
for i, p in enumerate(probabilities)
]
)
logger.info("model_temperature %s", model_temperature)
# mc_threshold = []
# for key in model_keys[:-1]:
# gm = GaussianMixture(n_components=2).fit(model_probs[key][:, 0].reshape((-1, 1)))
# idx = np.argsort(gm.means_.flatten())[-1]
# mean = gm.means_.flatten()[idx]
# var = gm.covariances_.flatten()[idx]
# mc_threshold.append(
# mean
# )
# logger.info("%s means_ %s covariances_ %s mean %s var %s", key, gm.means_, gm.covariances_, mean, var)
# # mc_threshold.append(
# # np.mean(model_probs[key][:, 0]) # - np.std(model_probs[key][:, 0])
# # )
# logger.info("Threshold %s", mc_threshold)
def total_reward(threshold, model_keys):
reward = 0
energy = 0
mask = | np.array([False] * num_labels) | numpy.array |
from __future__ import absolute_import, division, print_function
from joblib import delayed, Parallel
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import multiprocessing
import threading
import warnings
warnings.simplefilter('ignore')
# Package imports
from externals.six.moves import range
from feature_selectors import (permutation_test_mc, permutation_test_mi,
permutation_test_dcor, permutation_test_pcor,
permutation_test_rdc)
from feature_selectors import mc_fast, mi, pcor, py_dcor
from scorers import gini_index, mse
from utils import bayes_boot_probs, logger
###################
"""SINGLE MODELS"""
###################
class Node(object):
"""Decision node in tree
Parameters
----------
col : int
Integer indexing the location of feature or column
col_pval : float
Probability value from permutation test for feature selection
threshold : float
Best split found in feature
impurity : float
Impurity measuring quality of split
value : 1d array-like or float
For classification trees, estimate of each class probability
For regression trees, central tendency estimate
left_child : tuple
For left child node, two element tuple with first element a 2d array of
features and second element a 1d array of labels
right_child : tuple
For right child node, two element tuple with first element a 2d array of
features and second element a 1d array of labels
"""
def __init__(self, col=None, col_pval=None, threshold=None, impurity=None,
value=None, left_child=None, right_child=None):
self.col = col
self.col_pval = col_pval
self.threshold = threshold
self.impurity = impurity
self.value = value
self.left_child = left_child
self.right_child = right_child
class CITreeBase(object):
"""Base class for conditional inference tree
Parameters
----------
min_samples_split : int
Minimum samples required for a split
alpha : float
Threshold value for selecting feature with permutation tests. Smaller
values correspond to shallower trees
max_depth : int
Maximum depth to grow tree
max_feats : str or int
Maximum feats to select at each split. String arguments include 'sqrt',
'log', and 'all'
n_permutations : int
Number of permutations during feature selection
early_stopping : bool
Whether to implement early stopping during feature selection. If True,
then as soon as the first permutation test returns a p-value less than
alpha, this feature will be chosen as the splitting variable
muting : bool
Whether to perform variable muting
verbose : bool or int
Controls verbosity of training and testing
n_jobs : int
Number of jobs for permutation testing
random_state : int
Sets seed for random number generator
"""
def __init__(self, min_samples_split=2, alpha=.05, max_depth=-1,
max_feats=-1, n_permutations=100, early_stopping=False,
muting=True, verbose=0, n_jobs=-1, random_state=None):
# Error checking
if alpha <= 0 or alpha > 1:
raise ValueError("Alpha (%.2f) should be in (0, 1]" % alpha)
if n_permutations < 0:
raise ValueError("n_permutations (%d) should be > 0" % \
n_permutations)
if not isinstance(max_feats, int) and max_feats not in ['sqrt', 'log', 'all', -1]:
raise ValueError("%s not a valid argument for max_feats" % \
str(max_feats))
# Define attributes
self.alpha = float(alpha)
self.min_samples_split = max(1, int(min_samples_split))
self.n_permutations = int(n_permutations)
self.max_feats = max_feats
self.early_stopping = early_stopping
self.muting = muting
self.verbose = verbose
self.n_jobs = n_jobs
self.root = None
self.splitter_counter_ = 0
if max_depth == -1:
self.max_depth = np.inf
else:
self.max_depth = int(max(1, max_depth))
if random_state is None:
self.random_state = np.random.randint(1, 9999)
else:
# TODO: ADD CHECK FOR CRAZY LARGE INTEGER?
self.random_state = int(random_state)
def _mute_feature(self, col_to_mute):
"""Removes variable from being selected
Parameters
----------
col_to_mute : int
Integer index of column to remove
"""
# Remove feature from protected features array
idx = np.where(self.available_features_ == col_to_mute)[0]
# Mute feature if not in protected set
if idx in self.protected_features_:
return
else:
self.available_features_ = np.delete(self.available_features_, idx)
# Recalculate actual number for max_feats before fitting
p = self.available_features_.shape[0]
if self.max_feats == 'sqrt':
self.max_feats = int(np.sqrt(p))
elif self.max_feats == 'log':
self.max_feats = int(np.log(p+1))
elif self.max_feats in ['all', -1]:
self.max_feats = p
else:
self.max_feats = int(self.max_feats)
# Check to make sure max_feats is not larger than the number of remaining
# features
if self.max_feats > len(self.available_features_):
self.max_feats = len(self.available_features_)
def _selector(self, X, y, col_idx):
"""Find feature most correlated with label"""
raise NotImplementedError("_splitter method not callable from base class")
def _splitter(self, *args, **kwargs):
"""Finds best split for feature"""
raise NotImplementedError("_splitter method not callable from base class")
def _build_tree(self, X, y, depth=0):
"""Recursively builds tree
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
depth : int
Depth of current recursive call
Returns
-------
Node : object
Child node or terminal node in recursive splitting
"""
n, p = X.shape
# Check for stopping criteria
if n > self.min_samples_split and \
depth < self.max_depth and \
not np.all(y == y[0]):
# Controls randomness of column sampling
self.splitter_counter_ += 1
np.random.seed(self.random_state*self.splitter_counter_)
# Find column with strongest association with outcome
try:
col_idx = np.random.choice(self.available_features_,
size=self.max_feats, replace=False)
except:
col_idx = np.random.choice(self.available_features_,
size=len(self.available_features_),
replace=False)
col, col_pval = self._selector(X, y, col_idx)
# Add selected feature to protected features
if col not in self.protected_features_:
self.protected_features_.append(col)
if self.verbose > 1:
logger("tree", "Added feature %d to protected set, size "
"= %d" % (col, len(self.protected_features_)))
if col_pval <= self.alpha:
# Find best split among selected variable
impurity, threshold, left, right = self._splitter(X, y, n, col)
if left and right and len(left[0]) > 0 and len(right[0]) > 0:
# Build subtrees for the right and left branches
if self.verbose:
logger("tree", "Building left subtree with "
"%d samples at depth %d" % \
(len(left[0]), depth+1))
left_child = self._build_tree(*left, depth=depth+1)
if self.verbose:
logger("tree", "Building right subtree with "
"%d samples at depth %d" % \
(len(right[0]), depth+1))
right_child = self._build_tree(*right, depth=depth+1)
# Return all arguments to constructor except value
return Node(col=col, col_pval=col_pval, threshold=threshold,
left_child=left_child, right_child=right_child,
impurity=impurity)
# Calculate terminal node value
if self.verbose: logger("tree", "Root node reached at depth %d" % depth)
value = self.node_estimate(y)
# Terminal node, no other values to pass to constructor
return Node(value=value)
def fit(self, X, y=None):
"""Trains model
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CITreeBase
Instance of CITreeBase class
"""
if self.verbose:
logger("tree", "Building root node with %d samples" % X.shape[0])
# Calculate actual number for max_feats before fitting
p = X.shape[1]
if self.max_feats == 'sqrt':
self.max_feats = int(np.sqrt(p))
elif self.max_feats == 'log':
self.max_feats = int(np.log(p+1))
elif self.max_feats in ['all', -1]:
self.max_feats = p
else:
self.max_feats = int(self.max_feats)
# Begin recursive build
self.protected_features_ = []
self.available_features_ = np.arange(p, dtype=int)
self.feature_importances_ = np.zeros(p)
self.root = self._build_tree(X, y)
sum_fi = np.sum(self.feature_importances_)
if sum_fi > 0: self.feature_importances_ /= sum_fi
return self
def predict_label(self, X, tree=None):
"""Predicts label
Parameters
----------
X : 2d array-like
Array of features for single sample
tree : CITreeBase
Trained tree
Returns
-------
label : int or float
Predicted label
"""
# If we have a value => return value as the prediction
if tree is None: tree = self.root
if tree.value is not None: return tree.value
# Determine if we will follow left or right branch
feature_value = X[tree.col]
branch = tree.left_child if feature_value <= tree.threshold \
else tree.right_child
# Test subtree
return self.predict_label(X, branch)
def predict(self, *args, **kwargs):
"""Predicts labels on test data"""
raise NotImplementedError("predict method not callable from base class")
def print_tree(self, tree=None, indent=" ", child=None):
"""Prints tree structure
Parameters
----------
tree : CITreeBase
Trained tree model
indent : str
Indent spacing
child : Node
Left or right child node
"""
# If we're at leaf => print the label
if not tree: tree = self.root
if tree.value is not None: print("label:", tree.value)
# Go deeper down the tree
else:
# Print splitting rule
print("X[:,%s] %s %s " % (tree.col,
'<=' if child in [None, 'left'] else '>',
tree.threshold))
# Print the left child
print("%sL: " % (indent), end="")
self.print_tree(tree.left_child, indent + indent, 'left')
# Print the right
print("%sR: " % (indent), end="")
self.print_tree(tree.right_child, indent + indent, 'right')
class CITreeClassifier(CITreeBase, BaseEstimator, ClassifierMixin):
"""Conditional inference tree classifier
Parameters
----------
selector : str
Variable selector for finding strongest association between a feature
and the label
Derived from CITreeBase class; see constructor for parameter definitions
"""
def __init__(self,
min_samples_split=2,
alpha=.05,
selector='mc',
max_depth=-1,
max_feats=-1,
n_permutations=100,
early_stopping=False,
muting=True,
verbose=0,
n_jobs=-1,
random_state=None):
# Define node estimate
self.node_estimate = self._estimate_proba
# Define selector
if selector not in ['mc', 'mi', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"mc, mi, and hybrid")
self.selector = selector
if self.selector != 'hybrid':
# Wrapper correlation selector
self._selector = self._cor_selector
# Permutation test based on correlation measure
if self.selector == 'mc':
self._perm_test = permutation_test_mc
else:
self._perm_test = permutation_test_mi
else:
self._perm_test = None
self._selector = self._hybrid_selector
super(CITreeClassifier, self).__init__(
min_samples_split=min_samples_split,
alpha=alpha,
max_depth=max_depth,
max_feats=max_feats,
n_permutations=n_permutations,
early_stopping=early_stopping,
muting=muting,
verbose=verbose,
n_jobs=n_jobs,
random_state=random_state)
def _hybrid_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a hybrid of multiple correlation and mutual information measures
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
if mc_fast(X[:, col], y, self.n_classes_) >= mi(X[:, col], y):
pval = permutation_test_mc(x=X[:, col],
y=y,
n_classes=self.n_classes_,
B=self.n_permutations,
random_state=self.random_state)
else:
pval = permutation_test_mi(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _splitter(self, X, y, n, col):
"""Splits data set into two child nodes based on optimized weighted
gini index
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
col : list
Column of X to search for best split
Returns
-------
best_impurity : float
Gini index associated with best split
best_threshold : float
X value associated with splitting of data set into two child nodes
left : tuple
Left child node data consisting of two elements: (features, labels)
right : tuple
Right child node data consisting of two elements: (features labels)
"""
if self.verbose > 1:
logger("splitter", "Testing splits on feature %d" % col)
# Initialize variables for splitting
impurity, threshold = 0.0, None
left, right = None, None
# Call sklearn's optimized implementation of decision tree classifiers
# to make split using Gini index
base = DecisionTreeClassifier(
max_depth=1, min_samples_split=self.min_samples_split
).fit(X[:, col].reshape(-1, 1), y).tree_
# Make split based on best threshold
threshold = base.threshold[0]
idx = np.where(X[:, col] <= threshold, 1, 0)
X_left, y_left = X[idx==1], y[idx==1]
X_right, y_right = X[idx==0], y[idx==0]
n_left, n_right = X_left.shape[0], X_right.shape[0]
# Skip small splits
if n_left < self.min_samples_split or n_right < self.min_samples_split:
return impurity, threshold, left, right
# Calculate parent and weighted children impurities
if len(base.impurity) == 3:
node_impurity = base.impurity[0]
left_impurity = base.impurity[1]*(n_left/float(n))
right_impurity = base.impurity[2]*(n_right/float(n))
else:
node_impurity = gini_index(y, self.labels_)
left_impurity = gini_index(y_left, self.labels_)*(n_left/float(n))
right_impurity = gini_index(y_right, self.labels_)*(n_right/float(n))
# Define groups and calculate impurity decrease
left, right = (X_left, y_left), (X_right, y_right)
impurity = node_impurity - (left_impurity + right_impurity)
# Update feature importance (mean decrease impurity)
self.feature_importances_[col] += impurity
return impurity, threshold, left, right
def _cor_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a correlation measure
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
# Mute feature and continue since constant
if np.all(X[:, col] == X[0, col]) and len(self.available_features_) > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "Constant values, muting feature %d" \
% col)
continue
pval = self._perm_test(x=X[:, col],
y=y,
n_classes=self.n_classes_,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _estimate_proba(self, y):
"""Estimates class distribution in node
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
class_probs : 1d array-like
Array of class probabilities
"""
return np.array([np.mean(y == label) for label in self.labels_])
def fit(self, X, y, labels=None):
"""Trains conditional inference tree classifier
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
labels : 1d array-like
Array of unique class labels
Returns
-------
self : CITreeClassifier
Instance of CITreeClassifier class
"""
self.labels_ = labels if labels is not None else np.unique(y)
self.n_classes_ = len(self.labels_)
super(CITreeClassifier, self).fit(X, y)
return self
def predict_proba(self, X):
"""Predicts class probabilities for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
class_probs : 2d array-like
Array of predicted class probabilities
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
return np.array([self.predict_label(sample) for sample in X])
def predict(self, X):
"""Predicts class labels for feature vectors X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
y : 1d array-like
Array of predicted classes
"""
y_proba = self.predict_proba(X)
return np.argmax(y_proba, axis=1)
class CITreeRegressor(CITreeBase, BaseEstimator, RegressorMixin):
"""Conditional inference tree regressor
Parameters
----------
selector : str
Variable selector for finding strongest association between a feature
and the label
Derived from CITreeBase class; see constructor for rest of parameter definitions
"""
def __init__(self,
min_samples_split=2,
alpha=.05,
selector='pearson',
max_depth=-1,
max_feats=-1,
n_permutations=100,
early_stopping=False,
muting=True,
verbose=0,
n_jobs=-1,
random_state=None):
# Define node estimate
self.node_estimate = self._estimate_mean
# Define selector
if selector not in ['pearson', 'distance', 'rdc', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"pearson, distance, rdc, and hybrid")
self.selector = selector
if self.selector != 'hybrid':
# Wrapper correlation selector
self._selector = self._cor_selector
# Permutation test based on correlation measure
if self.selector == 'pearson':
self._perm_test = permutation_test_pcor
elif self.selector == 'distance':
self._perm_test = permutation_test_dcor
else:
self._perm_test = permutation_test_rdc
else:
self._perm_test = None
self._selector = self._hybrid_selector
super(CITreeRegressor, self).__init__(
min_samples_split=min_samples_split,
alpha=alpha,
max_depth=max_depth,
max_feats=max_feats,
n_permutations=n_permutations,
early_stopping=early_stopping,
muting=muting,
verbose=verbose,
n_jobs=n_jobs,
random_state=random_state)
def _hybrid_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a hybrid of pearson and distance correlation measures
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
if abs(pcor(X[:, col], y)) >= abs(py_dcor(X[:, col], y)):
pval = permutation_test_pcor(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
else:
pval = permutation_test_dcor(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _cor_selector(self, X, y, col_idx):
"""Selects feature most correlated with y using permutation tests with
a correlation measure
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
col_idx : list
Columns of X to examine for feature selection
Returns
-------
best_col : int
Best column from feature selection. Note, if early_stopping is
enabled then this may not be the absolute best column
best_pval : float
Probability value from permutation test
"""
# Select random column from start and update
best_col, best_pval = np.random.choice(col_idx), np.inf
# Iterate over columns
for col in col_idx:
# Mute feature and continue since constant
if np.all(X[:, col] == X[0, col]) and len(self.available_features_) > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "Constant values, muting feature %d" \
% col)
continue
pval = self._perm_test(x=X[:, col],
y=y,
B=self.n_permutations,
random_state=self.random_state)
# If variable muting
if self.muting and \
pval == 1.0 and \
self.available_features_.shape[0] > 1:
self._mute_feature(col)
if self.verbose: logger("tree", "ASL = 1.0, muting feature %d" % col)
if pval < best_pval:
best_col, best_pval = col, pval
# If early stopping
if self.early_stopping and best_pval < self.alpha:
if self.verbose: logger("tree", "Early stopping")
return best_col, best_pval
return best_col, best_pval
def _splitter(self, X, y, n, col):
"""Splits data set into two child nodes based on optimized weighted
mean squared error
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
col : list
Column of X to search for best split
Returns
-------
best_impurity : float
Mean squared error associated with best split
best_threshold : float
X value associated with splitting of data set into two child nodes
left : tuple
Left child node data consisting of two elements: (features, labels)
right : tuple
Right child node data consisting of two elements: (features labels)
"""
if self.verbose > 1:
logger("splitter", "Testing splits on feature %d" % col)
# Initialize variables for splitting
impurity, threshold = 0.0, None
left, right = None, None
# Call sklearn's optimized implementation of decision tree regressors
# to make split using mean squared error
base = DecisionTreeRegressor(
max_depth=1, min_samples_split=self.min_samples_split
).fit(X[:, col].reshape(-1, 1), y).tree_
# Make split based on best threshold
threshold = base.threshold[0]
idx = np.where(X[:, col] <= threshold, 1, 0)
X_left, y_left = X[idx==1], y[idx==1]
X_right, y_right = X[idx==0], y[idx==0]
n_left, n_right = X_left.shape[0], X_right.shape[0]
# Skip small splits
if n_left < self.min_samples_split or n_right < self.min_samples_split:
return impurity, threshold, left, right
# Calculate parent and weighted children impurities
if len(base.impurity) == 3:
node_impurity = base.impurity[0]
left_impurity = base.impurity[1]*(n_left/float(n))
right_impurity = base.impurity[2]*(n_right/float(n))
else:
node_impurity = mse(y)
left_impurity = mse(y_left)*(n_left/float(n))
right_impurity = mse(y_right)*(n_right/float(n))
# Define groups and calculate impurity decrease
left, right = (X_left, y_left), (X_right, y_right)
impurity = node_impurity - (left_impurity + right_impurity)
# Update feature importance (mean decrease impurity)
self.feature_importances_[col] += impurity
return impurity, threshold, left, right
def _estimate_mean(self, y):
"""Estimates mean in node
Parameters
----------
y : 1d array-like
Array of labels
Returns
-------
mu : float
Node mean estimate
"""
return np.mean(y)
def fit(self, X, y):
"""Trains conditional inference tree regressor
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CITreeRegressor
Instance of CITreeRegressor class
"""
super(CITreeRegressor, self).fit(X, y)
return self
def predict(self, X):
"""Predicts labels for feature vectors in X
Parameters
----------
X : 2d array-like
Array of features
Returns
-------
y_hat : 1d array-like
Array of predicted labels
"""
if self.verbose:
logger("test", "Predicting labels for %d samples" % X.shape[0])
return np.array([self.predict_label(sample) for sample in X])
#####################
"""ENSEMBLE MODELS"""
#####################
def stratify_sampled_idx(random_state, y, bayes):
"""Indices for stratified bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Stratified sampled indices for each class
"""
np.random.seed(random_state)
idx = []
for label in np.unique(y):
# Grab indices for class
tmp = np.where(y==label)[0]
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=len(tmp)) if bayes else None
idx.append(np.random.choice(tmp, size=len(tmp), replace=True, p=p))
return idx
def stratify_unsampled_idx(random_state, y, bayes):
"""Unsampled indices for stratified bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Stratified unsampled indices for each class
"""
np.random.seed(random_state)
sampled = stratify_sampled_idx(random_state, y, bayes)
idx = []
for i, label in enumerate(np.unique(y)):
idx.append(np.setdiff1d(np.where(y==label)[0], sampled[i]))
return idx
def balanced_sampled_idx(random_state, y, bayes, min_class_p):
"""Indices for balanced bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
idx : list
Balanced sampled indices for each class
"""
np.random.seed(random_state)
idx, n = [], int(np.floor(min_class_p*len(y)))
for i, label in enumerate(np.unique(y)):
# Grab indices for class
tmp = np.where(y==label)[0]
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=len(tmp)) if bayes else None
idx.append(np.random.choice(tmp, size=n, replace=True, p=p))
return idx
def balanced_unsampled_idx(random_state, y, bayes, min_class_p):
"""Unsampled indices for balanced bootstrap sampling in classification
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
bayes : bool
If True, performs Bayesian bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
idx : list
Balanced unsampled indices for each class
"""
np.random.seed(random_state)
sampled = balanced_sampled_idx(random_state, y, bayes, min_class_p)
idx = []
for i, label in enumerate(np.unique(y)):
idx.append(np.setdiff1d(np.where(y==label)[0], sampled[i]))
return idx
def normal_sampled_idx(random_state, n, bayes):
"""Indices for bootstrap sampling
Parameters
----------
random_state : int
Sets seed for random number generator
n : int
Sample size
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Sampled indices
"""
np.random.seed(random_state)
# Bayesian bootstrapping if specified
p = bayes_boot_probs(n=n) if bayes else None
return np.random.choice(np.arange(n, dtype=int), size=n, replace=True, p=p)
def normal_unsampled_idx(random_state, n, bayes):
"""Unsampled indices for bootstrap sampling
Parameters
----------
random_state : int
Sets seed for random number generator
y : 1d array-like
Array of labels
n : int
Sample size
bayes : bool
If True, performs Bayesian bootstrap sampling
Returns
-------
idx : list
Unsampled indices
"""
sampled = normal_sampled_idx(random_state, n, bayes)
counts = np.bincount(sampled, minlength=n)
return np.arange(n, dtype=int)[counts==0]
def _parallel_fit_classifier(tree, X, y, n, tree_idx, n_estimators, bootstrap,
bayes, verbose, random_state, class_weight=None,
min_dist_p=None):
"""Utility function for building trees in parallel
Note: This function can't go locally in a class, because joblib complains
that it cannot pickle it when placed there
Parameters
----------
tree : CITreeClassifier
Instantiated conditional inference tree
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
tree_idx : int
Index of tree in forest
n_estimators : int
Number of total estimators
bootstrap : bool
Whether to perform bootstrap sampling
bayes : bool
If True, performs Bayesian bootstrap sampling
verbose : bool or int
Controls verbosity of training process
random_state : int
Sets seed for random number generator
class_weight : str
Type of sampling during bootstrap, None for regular bootstrapping,
'balanced' for balanced bootstrap sampling, and 'stratify' for
stratified bootstrap sampling
min_class_p : float
Minimum proportion of class labels
Returns
-------
tree : CITreeClassifier
Fitted conditional inference tree
"""
# Print status if conditions met
if verbose and n_estimators >= 10:
denom = n_estimators if verbose > 1 else 10
if (tree_idx+1) % int(n_estimators/denom) == 0:
logger("tree", "Building tree %d/%d" % (tree_idx+1, n_estimators))
# Bootstrap sample if specified
if bootstrap:
random_state = random_state*(tree_idx+1)
if class_weight == 'balanced':
idx = np.concatenate(
balanced_sampled_idx(random_state, y, bayes, min_dist_p)
)
elif class_weight == 'stratify':
idx = np.concatenate(
stratify_sampled_idx(random_state, y, bayes)
)
else:
idx = normal_sampled_idx(random_state, n, bayes)
# Note: We need to pass the classes in the case of the bootstrap
# because not all classes may be sampled and when it comes to prediction,
# the tree models learns a different number of classes across different
# bootstrap samples
tree.fit(X[idx], y[idx], np.unique(y))
else:
tree.fit(X, y)
return tree
def _parallel_fit_regressor(tree, X, y, n, tree_idx, n_estimators, bootstrap,
bayes, verbose, random_state):
"""Utility function for building trees in parallel
Note: This function can't go locally in a class, because joblib complains
that it cannot pickle it when placed there
Parameters
----------
tree : CITreeRegressor
Instantiated conditional inference tree
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
n : int
Number of samples
tree_idx : int
Index of tree in forest
n_estimators : int
Number of total estimators
bootstrap : bool
Whether to perform bootstrap sampling
bayes : bool
If True, performs Bayesian bootstrap sampling
verbose : bool or int
Controls verbosity of training process
random_state : int
Sets seed for random number generator
Returns
-------
tree : CITreeRegressor
Fitted conditional inference tree
"""
# Print status if conditions met
if verbose and n_estimators >= 10:
denom = n_estimators if verbose > 1 else 10
if (tree_idx+1) % int(n_estimators/denom) == 0:
logger("tree", "Building tree %d/%d" % (tree_idx+1, n_estimators))
# Bootstrap sample if specified
if bootstrap:
random_state = random_state*(tree_idx+1)
idx = normal_sampled_idx(random_state, n, bayes)
# Train
tree.fit(X[idx], y[idx])
else:
tree.fit(X, y)
return tree
def _accumulate_prediction(predict, X, out, lock):
"""Utility function to aggregate predictions in parallel
Parameters
----------
predict : function handle
Alias to prediction method of class
X : 2d array-like
Array of features
out : 1d or 2d array-like
Array of labels
lock : threading lock
A lock that controls worker access to data structures for aggregating
predictions
Returns
-------
None
"""
prediction = predict(X)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)): out[i] += prediction[i]
class CIForestClassifier(BaseEstimator, ClassifierMixin):
"""Conditional forest classifier
Parameters
----------
min_samples_split : int
Minimum samples required for a split
alpha : float
Threshold value for selecting feature with permutation tests. Smaller
values correspond to shallower trees
selector : str
Variable selector for finding strongest association between a feature
and the label
max_depth : int
Maximum depth to grow tree
max_feats : str or int
Maximum feats to select at each split. String arguments include 'sqrt',
'log', and 'all'
n_permutations : int
Number of permutations during feature selection
early_stopping : bool
Whether to implement early stopping during feature selection. If True,
then as soon as the first permutation test returns a p-value less than
alpha, this feature will be chosen as the splitting variable
muting : bool
Whether to perform variable muting
verbose : bool or int
Controls verbosity of training and testing
bootstrap : bool
Whether to perform bootstrap sampling for each tree
bayes : bool
If True, performs Bayesian bootstrap sampling
class_weight : str
Type of sampling during bootstrap, None for regular bootstrapping,
'balanced' for balanced bootstrap sampling, and 'stratify' for
stratified bootstrap sampling
n_jobs : int
Number of jobs for permutation testing
random_state : int
Sets seed for random number generator
"""
def __init__(self, min_samples_split=2, alpha=.05, selector='mc', max_depth=-1,
n_estimators=100, max_feats='sqrt', n_permutations=100,
early_stopping=True, muting=True, verbose=0, bootstrap=True,
bayes=True, class_weight='balanced', n_jobs=-1, random_state=None):
# Error checking
if alpha <= 0 or alpha > 1:
raise ValueError("Alpha (%.2f) should be in (0, 1]" % alpha)
if selector not in ['mc', 'mi', 'hybrid']:
raise ValueError("%s not a valid selector, valid selectors are " \
"mc, mi, and hybrid")
if n_permutations < 0:
raise ValueError("n_permutations (%s) should be > 0" % \
str(n_permutations))
if not isinstance(max_feats, int) and max_feats not in ['sqrt', 'log', 'all', -1]:
raise ValueError("%s not a valid argument for max_feats" % \
str(max_feats))
if n_estimators < 0:
raise ValueError("n_estimators (%s) must be > 0" % \
str(n_estimators))
# Only for classifier model
if class_weight not in [None, 'balanced', 'stratify']:
raise ValueError("%s not a valid argument for class_weight" % \
str(class_weight))
# Placeholder variable for regression model (not applicable)
if class_weight is None: self.min_class_p = None
# Define attributes
self.alpha = float(alpha)
self.selector = selector
self.min_samples_split = max(1, min_samples_split)
self.n_permutations = int(n_permutations)
if max_depth == -1:
self.max_depth = max_depth
else:
self.max_depth = int(max(1, max_depth))
self.n_estimators = int(max(1, n_estimators))
self.max_feats = max_feats
self.bootstrap = bootstrap
self.early_stopping = early_stopping
self.muting = muting
self.n_jobs = n_jobs
self.verbose = verbose
self.class_weight = class_weight
self.bayes = bayes
if random_state is None:
self.random_state = np.random.randint(1, 9999)
else:
# TODO: ADD CHECK FOR CRAZY LARGE INTEGER?
self.random_state = int(random_state)
# Package params for calling CITreeClassifier
self.params = {
'alpha' : self.alpha,
'selector' : self.selector,
'min_samples_split' : self.min_samples_split,
'n_permutations' : self.n_permutations,
'max_feats' : self.max_feats,
'early_stopping' : self.early_stopping,
'muting' : self.muting,
'verbose' : 0,
'n_jobs' : 1,
'random_state' : None,
}
def fit(self, X, y):
"""Fit conditional forest classifier
Parameters
----------
X : 2d array-like
Array of features
y : 1d array-like
Array of labels
Returns
-------
self : CIForestClassifier
Instance of CIForestClassifier
"""
self.labels_ = np.unique(y)
self.n_classes_ = len(self.labels_)
if self.verbose:
logger("tree", "Training ensemble with %d trees on %d samples" % \
(self.n_estimators, X.shape[0]))
# Instantiate base tree models
self.estimators_ = []
for i in range(self.n_estimators):
self.params['random_state'] = self.random_state*(i+1)
self.estimators_.append(CITreeClassifier(**self.params))
# Define class distribution
self.class_dist_p = np.array([
| np.mean(y==label) | numpy.mean |
###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by <NAME> <<EMAIL>> #
# <NAME> <<EMAIL>> #
# #
# Copyright (c) 2017-2021 University of Colorado Boulder #
# (c) 2012 The University of Virginia #
# #
###########################################################################
r"""
Data structures carrying simulation data.
"""
from typing import Any, List, Optional, Tuple
import numpy as np
from ..util import error as pv_error
from ..util.util import array_equal_shape_and_close
class RectangularBox:
def __init__(self, box: np.ndarray):
self.__box = None
self.__nframes = 0
assert 0 < box.ndim < 3
if box.ndim == 1:
assert box.size == 3
self.__box = box
self.__nframes = 1
elif box.ndim == 2:
assert box.shape[1] == 3
self.__box = box
self.__nframes = box.shape[0]
@property
def box(self):
return self.__box
def gather(
self, positions: np.ndarray, bonds: List[List[int]], molec_idx: List[int]
):
bonds = np.array(bonds)
if bonds.size == 0:
return positions
positions = np.array(positions)
assert 1 < positions.ndim < 4
if positions.ndim == 2:
nframes = 1
positions = np.array([positions])
else:
nframes = positions.shape[0]
if self.__nframes != 1:
assert self.__nframes == nframes
for f in range(nframes):
p = positions[f]
if self.__nframes > 1:
box = self.__box[f]
else:
box = self.__box[0]
assert len(bonds) == len(molec_idx)
for mbonds, idx in zip(bonds, molec_idx):
for b in mbonds:
a1 = idx + b[0]
a2 = idx + b[1]
p[a2] += np.round((p[a1] - p[a2]) / box) * box
positions[f] = p
return positions
class TrajectoryData(object):
r"""TrajectoryData: The position and velocity trajectory along the simulation
The full trajectory is needed to calculate the equipartition of the kinetic energy.
As they are used in connection, the position and velocity trajectories are expected
to have the same shape and number of frames.
The position and velocity trajectories can be accessed either using the getters
of an object, as in
* trajectory.position
* trajectory.velocity
or using the key notation, as in
* trajectory['position']
* trajectory['velocity']
"""
@staticmethod
def trajectories() -> Tuple[str, str]:
return "position", "velocity"
def __init__(self, position: Optional[Any] = None, velocity: Optional[Any] = None):
self.__position = None
self.__velocity = None
self.__nframes = None
self.__natoms = None
self.__getters = {
"position": TrajectoryData.position.__get__,
"velocity": TrajectoryData.velocity.__get__,
}
self.__setters = {
"position": TrajectoryData.position.__set__,
"velocity": TrajectoryData.velocity.__set__,
}
# Consistency check
assert set(self.__getters.keys()) == set(self.__setters.keys())
assert set(self.__getters.keys()) == set(TrajectoryData.trajectories())
if position is not None:
self.position = position
if velocity is not None:
self.velocity = velocity
def __getitem__(self, key: str) -> Optional[np.ndarray]:
if key not in self.trajectories():
raise KeyError
return self.__getters[key](self)
def __setitem__(self, key: str, value: Any) -> None:
if key not in self.trajectories():
raise KeyError
self.__setters[key](self, value)
def __check_value(self, value: Any, key: str) -> np.ndarray:
value = np.array(value)
if value.ndim == 2:
# create 3-dimensional array
value = | np.array([value]) | numpy.array |
from ast import Return
import webbrowser
import tkinter as tk
from tkinter import BooleanVar, Toplevel, ttk
from tkinter.constants import BOTH
from PIL import Image, ImageTk
from PIL import ImageGrab
import CalcFunctions as Calc
import json
from SideMenu import SideMenu
from tkvideo import tkvideo
from PlotFunctions import plot, plot_principal_axes
import shape_builder
from SettingsWindow import settings_window
from ErrorWindow import error_window
from UpdateWindow import update_window
from tkinter.filedialog import asksaveasfile
from tkinter import messagebox
import datetime as dt
from fpdf import FPDF
import numpy as np
import os
from urllib.request import urlopen
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
## ANIMATION WINDOW -----------------------------------------------------------------------------------------------------------------------------------------------------------
class starting_window(tk.Tk):
def __init__(self):
super().__init__()
self.overrideredirect(1)
# Position the window in the center of the page.
positionRight = int(self.winfo_screenwidth()/2 - 240)
positionDown = int(self.winfo_screenheight()/2 - 120)
self.geometry("+{}+{}".format(positionRight, positionDown))
# Play splash screen on tkinter widget
self.splash_image = Image.open("AMSZ_splash.png")
self.splash_image = self.splash_image.resize((480,240), Image.ANTIALIAS)
self.splash_img = ImageTk.PhotoImage(self.splash_image)
my_label = tk.Label(self, image = self.splash_img)
my_label.pack()
self.after(1500, lambda: self.destroy())
## MAIN WINDOW -----------------------------------------------------------------------------------------------------------------------------------------------------------
class main_window(tk.Tk):
# def onExit(self):
# self.quit()
def __init__(self):
super().__init__()
# main window opening size
self.win_width = 1301
self.win_height = 750
# Current version
self.version = 1.1
# screen size
self.screen_width = self.winfo_screenwidth()
self.screen_height = self.winfo_screenheight()
# boolean to decide if the window can fit to the screen
self.size_ok = tk.BooleanVar(False)
if self.win_width<self.screen_width/4*3 or self.win_height<self.screen_height/4*3:
self.size_ok.set(True)
# Position the window in the center of the page.
positionRight = int(self.winfo_screenwidth()/2 - self.win_width/2)
positionDown = int(self.winfo_screenheight()/2 - self.win_height/2)
self.geometry("+{}+{}".format(positionRight, positionDown))
# Variables
self.coordinate_on = tk.BooleanVar(False)
self.dimension_lines_on = tk.BooleanVar(False)
self.transformed_coordinate_on = tk.BooleanVar(False)
self.thickness_on = tk.BooleanVar(False)
self.coordinate_on.set(True)
self.dimension_lines_on.set(True)
self.plotted = tk.BooleanVar(False)
self.shape_builder_mode = False
self.window_open = BooleanVar(False)
#self.valid_sol = BooleanVar(False)
# Default unit, default theme
self.unit = settings["default_unit"]#"mm"
self.angle_unit = settings["angle_unit"] #! to settings
self.theme = settings["theme"]#"dark"
self.logo_enabled = settings["logo_enabled"]
#shape builder configuration
self.show_orig_axis = True
self.show_orig_axis_bool = tk.BooleanVar()
self.show_orig_axis_bool.set(self.show_orig_axis)
self.orig_axis_dissapier = False
self.orig_axis_dissapier_bool = tk.BooleanVar()
self.orig_axis_dissapier_bool.set(self.orig_axis_dissapier)
self.sb_ha_vis = True #visualizing hauptachsen in sb mode
self.sb_ha_vis_bool = tk.BooleanVar()
self.sb_ha_vis_bool.set(self.sb_ha_vis)
self.calc_for_orig_axis = False
self.calc_for_orig_axis_bool = tk.BooleanVar()
self.calc_for_orig_axis_bool.set(self.calc_for_orig_axis)
# Play AMSZ logo on startup
self.play_logo = tk.BooleanVar(False)
if self.logo_enabled == 'True':
self.play_logo.set(True)
else:
self.play_logo.set(False)
# Colors
if self.theme == "dark":
self.colors = DARK_THEME
else:
self.colors = LIGHT_THEME
## Window -------------------------------------------------------------------------------------------------------------------------------------------------------------------
self.title(f"Aream {self.version}")
if self.size_ok.get() == False:
self.state("zoomed") # Fullscreen
self.geometry(f"{self.win_width}x{self.win_height}")
self.configure(bg=self.colors['main_color'])
self.minsize(width=200, height=200)
self.tk.call('wm', 'iconphoto', self._w, tk.PhotoImage(file='logo_A.png'))
# self.iconbitmap("AMSZ.ico")
self.menu_is_on = False
self.create_menubar(self.shape_builder_mode, self.menu_is_on)
# Canvas for drawing
self.canvas = None
# Side Menu
self.sm = SideMenu(self)
self.sm.pack(side=tk.LEFT, padx = (20,10), pady = 20, fill=tk.Y)
# self.sm.pack(side=tk.LEFT, fill=tk.Y)
# angle_unit on pressing enter
self.bind('<Return>', self.calculate)
plot(self, None, False, False, False, False, self.colors, self.angle_unit)
# Checking for updates
url = "https://www.mm.bme.hu/amsz/index.php/python-masodrendu-nyomatek-szamito-felulet/"
page = urlopen(url)
html_bytes = page.read()
html = html_bytes.decode("utf-8")
s = str(html)
index = s.find("Legújabb verzió: ")
index_after = s.find("<br>", index, index+50)
index_version = index + 17
latest_version = s[index_version:index_after]
latest_version = float(latest_version)
if latest_version != self.version:
update_window(self)
## USEFUL FUNCTIONS -----------------------------------------------------------------------------------------------------------------------------------------------------------
def feedback(self):
webbrowser.open("https://forms.gle/gMP69MTgbtey9T5V8")
def help(self):
webbrowser.open("https://www.mm.bme.hu/amsz/index.php/python-masodrendu-nyomatek-szamito-felulet/")
def create_menubar(self, shape_builder_mode, menu_is_on):
if menu_is_on == True:
self.menu_canvas.pack_forget()
else:
self.menu_is_on = True
## custom menubar -----------------------------------------------------------------------------------------------------------------------------------------------------------
self.menu_canvas = tk.Canvas(self, bg=self.colors['secondary_color'], highlightthickness=0, height=26)
self.menu_canvas.pack(fill = tk.X)
# custom menubar objects
self.sol_save_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/sol_save.png")
self.sol_save_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/sol_save_hover.png")
self.sol_save_button = self.menu_canvas.create_image(0,0,anchor=tk.NW,image=self.sol_save_button_img)
self.menu_canvas.tag_bind(self.sol_save_button, '<Button-1>', lambda e: self.save_file())
self.menu_canvas.tag_bind(self.sol_save_button, '<Enter>', lambda e:self.menu_canvas.itemconfig(self.sol_save_button,
image=self.sol_save_button_hover_img))
self.menu_canvas.tag_bind(self.sol_save_button, '<Leave>', lambda e:self.menu_canvas.itemconfig(self.sol_save_button,
image=self.sol_save_button_img))
self.setting_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/settings.png")
self.setting_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/settings_hover.png")
self.setting_button = self.menu_canvas.create_image(167,0,anchor=tk.NW,image=self.setting_button_img)
self.menu_canvas.tag_bind(self.setting_button, '<Button-1>', lambda e: settings_window(self))
self.menu_canvas.tag_bind(self.setting_button, '<Enter>', lambda e:self.menu_canvas.itemconfig(self.setting_button,
image=self.setting_button_hover_img))
self.menu_canvas.tag_bind(self.setting_button, '<Leave>', lambda e:self.menu_canvas.itemconfig(self.setting_button,
image=self.setting_button_img))
self.basic_button_img = tk.PhotoImage(file=f"{self.colors['path']}/menubar/basic.png")
self.basic_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}/menubar/basic_hover.png")
self.change_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/change.png")
self.change_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/change_hover.png")
self.change_button = self.menu_canvas.create_image(261,0,anchor=tk.NW,image=self.change_button_img)
self.menu_canvas.tag_bind(self.change_button, '<Button-1>', lambda e: self.build_shape())
self.menu_canvas.tag_bind(self.change_button, '<Enter>', lambda e:self.menu_canvas.itemconfig(self.change_button,
image=self.change_button_hover_img))
self.menu_canvas.tag_bind(self.change_button, '<Leave>', lambda e:self.menu_canvas.itemconfig(self.change_button,
image=self.change_button_img))
if shape_builder_mode == False:
forms_posx = 167 + 94 + 104
help_posx = 167 + 94 + 104 + 97
else:
forms_posx = 167 + 94 + 118
help_posx = 167 + 94 + 118 + 97
self.forms_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/forms.png")
self.forms_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/forms_hover.png")
self.forms_button = self.menu_canvas.create_image(forms_posx,0,anchor=tk.NW,image=self.forms_button_img)
self.menu_canvas.tag_bind(self.forms_button, '<Button-1>', lambda e: self.feedback())
self.menu_canvas.tag_bind(self.forms_button, '<Enter>', lambda e:self.menu_canvas.itemconfig(self.forms_button,
image=self.forms_button_hover_img))
self.menu_canvas.tag_bind(self.forms_button, '<Leave>', lambda e:self.menu_canvas.itemconfig(self.forms_button,
image=self.forms_button_img))
self.help_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/help.png")
self.help_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/help_hover.png")
self.help_button = self.menu_canvas.create_image(help_posx,0,anchor=tk.NW,image=self.help_button_img)
self.menu_canvas.tag_bind(self.help_button, '<Button-1>', lambda e: self.help())
self.menu_canvas.tag_bind(self.help_button, '<Enter>', lambda e:self.menu_canvas.itemconfig(self.help_button,
image=self.help_button_hover_img))
self.menu_canvas.tag_bind(self.help_button, '<Leave>', lambda e:self.menu_canvas.itemconfig(self.help_button,
image=self.help_button_img))
def theme_change(self, theme):
if self.theme != theme:
self.theme=theme
if self.theme=="dark":
self.colors=DARK_THEME
self.sm.change_color(DARK_THEME)
# if self.plotted==True:
# plot(self, self.sm.shape, self.coordinate_on.get(), self.dimension_lines_on.get(), self.transformed_coordinate_on.get(), self.thickness_on.get(), self.colors)
elif self.theme == "light":
self.colors=LIGHT_THEME
self.sm.change_color(LIGHT_THEME)
# if self.plotted==True:
# plot(self, self.sm.shape, self.coordinate_on.get(), self.dimension_lines_on.get(), self.transformed_coordinate_on.get(), self.thickness_on.get(), self.colors)
else:
print("ERROR: Unknown Theme")
return -1
self.configure(bg=self.colors['main_color'])
settings['theme']=self.theme
self.destroy()
self.__init__()
#TODO: canvas color???? + plot
print(f"Theme set to {theme}")
def unit_change(self, unit_type, unit):
if unit_type == "degree":
self.angle_unit = unit
else:
self.unit = unit
for i in self.sm.controls:
if i["unit_type"] == unit_type:
i["unit"].config(text = unit)
try:
for i in self.sb.controls:
if i["unit_type"] == unit_type:
i["unit"].config(text = unit)
except:
None
def build_shape(self):
if not self.shape_builder_mode:
print("opening sb")
self.shape_builder_mode = True
self.create_menubar(self.shape_builder_mode, self.menu_is_on)
self.sm.pack_forget()
self.sb_sm = shape_builder.sb_side_menu(self)
self.sb_sm.pack(side=tk.LEFT, fill=tk.Y, padx = (20,10), pady = 20)
self.sb = shape_builder.shapeBuilder(self, self.sb_sm)
self.change_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/basic.png")
self.change_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/basic_hover.png")
self.menu_canvas.itemconfig (self.change_button, image=self.change_button_img)
if self.plotted==True:
self.canvas._tkcanvas.destroy()
self.sb.pack(expand=tk.YES, fill=tk.BOTH, padx = (10,20), pady = 20)
else:
print("closing sb")
self.sb.pack_forget()
self.sb_sm.pack_forget()
self.shape_builder_mode = False
self.create_menubar(self.shape_builder_mode, self.menu_is_on)
self.sm.pack(side=tk.LEFT, fill=tk.Y, padx = (20,10), pady = 20)
# calling = eval(f'self.sm.{self.sm.shape.lower()}_click')
# calling()
self.sm.combo_clear()
# self.combo_rectangle.grid(row=1, column=0, columnspan=5)
self.sm.combo_default_img = tk.PhotoImage(file=f"{self.colors['path']}combobox/combo_default.png")
self.sm.combo_default = tk.Label(self.sm.canvas, image=self.sm.combo_default_img, bg=self["background"], activebackground=self["background"])
self.sm.combo_default.bind('<Button-1>', func=lambda e:self.sm.combo_click())
self.sm.combo_default.grid(row=1, column=0, columnspan=5)
self.sm.combo_default["border"] = "0"
self.sm.clear()
# self.sm.combo_clear()
# self.sm.combo_rectangle.grid_forget() ## TODO eval func stringet códdá alakít
# self.sm.combo_default.grid(row=1, column=0, columnspan=5)
# self.sm.calling
self.plotted = False
self.sm.shape = None
plot(self, None, False, False, False, False, self.colors, self.angle_unit)
self.change_button_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/change.png")
self.change_button_hover_img = tk.PhotoImage(file=f"{self.colors['path']}menubar/change_hover.png")
self.menu_canvas.itemconfig (self.change_button, image=self.change_button_img)
def choose_object(self, shape = None):
self.dimensions = {
"a": 2,
"b": 1,
"d": 1
}
if shape == self.sm.shape:
return 0
if self.canvas is not None:
self.sm.clear()
if shape == "Rectangle" and self.sm.shape != "Rectangle":
self.sm.shape = "Rectangle"
self.sm.change_to_recrangle()
elif shape == "Circle" and self.sm.shape != "Circle":
self.sm.shape = "Circle"
self.sm.change_to_circle()
elif shape == "Ellipse" and self.sm.shape != "Ellipse":
self.sm.shape = "Ellipse"
self.sm.change_to_ellipse()
elif shape == "Isosceles_triangle" and self.sm.shape != "Isosceles_triangle":
self.sm.shape = "Isosceles_triangle"
self.sm.change_to_isosceles_triangle()
print(self.sm.shape)
elif shape == "Right_triangle" and self.sm.shape != "Right_triangle":
self.sm.shape = "Right_triangle"
self.sm.change_to_right_triangle()
print(self.sm.shape)
else:
self.sm.shape = None
print("Ez az alakzat még nincs definiálva...")
plot(self, self.sm.shape, self.coordinate_on.get(), self.dimension_lines_on.get(), self.transformed_coordinate_on.get(), self.thickness_on.get(), self.colors, self.angle_unit)
def get_entry(self, number_of_entries):
vissza = []
for i in range(number_of_entries):
if i >= 1 and self.sm.shape == "Circle": #! Jujj de csúnya...
i+=1
if self.sm.controls[i]["entry"].get().replace(',','.') == "":
print("Hiba")
self.sm.controls[i]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
vissza.append(None)
elif float(self.sm.controls[i]["entry"].get().replace(',','.')) > 0:
vissza.append(float(self.sm.controls[i]["entry"].get().replace(',','.')))
self.sm.controls[i]["entry"].config({"background": self.colors['entry_color']})
else:
print("Hiba")
self.sm.controls[i]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
vissza.append(None)
if self.transformed_coordinate_on.get():
for i in range(1,4):
if self.sm.shape == "Circle":
i += 1
if self.sm.controls[i]["entry"].get().replace(',','.') == "":
print("Hiba")
self.sm.controls[i]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
vissza.append(None)
else:
vissza.append(float(self.sm.controls[i]["entry"].get().replace(',','.')))
self.sm.controls[i]["entry"].config({"background": self.colors['entry_color']})
if self.sm.shape != "Circle":
i += 1
if self.sm.controls[i]["entry"].get().replace(',','.') == "":
print("Hiba")
self.sm.controls[i]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
vissza.append(None)
else:
vissza.append(float(self.sm.controls[i]["entry"].get().replace(',','.')))
self.sm.controls[i]["entry"].config({"background": self.colors['entry_color']})
if self.thickness_on.get():
if self.sm.shape == "Circle":
print("Kor szamitas")
t = float(self.sm.controls[-1]["entry"].get().replace(',','.'))
d = float(self.sm.controls[0]["entry"].get().replace(',','.'))
if 0 < t < d/2 or t is not None:
print("kor lehetseges")
t = float(self.sm.controls[-1]["entry"].get().replace(',','.'))
self.sm.controls[-1]["entry"].config({"background": self.colors['entry_color']})
else:
print("Hiba")
self.sm.controls[-1]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result2.config(text="Hiba a falvastagságban!")
vissza.append(None)
t = None
elif self.sm.shape == "Right_triangle":
print("Derekszogu haromszog szamitas")
t = float(self.sm.controls[-1]["entry"].get().replace(',','.'))
a = float(self.sm.controls[0]["entry"].get().replace(',','.'))
b = float(self.sm.controls[1]["entry"].get().replace(',','.'))
phi = np.arctan(b/a)
c = np.sqrt(a**2 + b**2)
print('phi: ' + str(phi*180/np.pi))
print('c: ' + str(c))
s1 = np.sqrt(b**2 + (a/2)**2)
s2 = np.sqrt(a**2 + (c/2)**2 - 2*a*(c/2)*np.cos(phi))
s3 = np.sqrt(a**2 + (b/2)**2)
print('s2: ' + str(s2))
print('s3: ' + str(s3))
t1 = a/3
t2 = b/3
beta = np.arccos( ( (s2/3)**2 - (2*s3/3)**2 - (c/2)**2 ) / ( -2 * (2*s3/3) * (c/2) ) )
print('beta: ' + str(beta))
t3 = (2*s3/3)*np.sin(beta)
print('t1: ' + str(t1))
print('t2: ' + str(t2))
print('t3: ' + str(t3))
# selecting the smallest
t_min = min(t1, t2, t3)
print('legkisebb: ' + str(t_min))
if 0 < t:
try:
t = float(self.sm.controls[-1]["entry"].get().replace(',','.'))
self.sm.controls[-1]["entry"].config({"background": self.colors['entry_color']})
except:
print("Hiba")
self.sm.controls[-1]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
self.sm.result2.config(text="Hiba a falvastagságban!")
vissza.append(None)
t = None
if 0 < t >= t_min:
self.sm.controls[0]["entry"].config({"background": self.colors['error_color']})
self.sm.controls[1]["entry"].config({"background": self.colors['error_color']})
self.sm.controls[-1]["entry"].config({"background": self.colors['error_color']})
for i in self.sm.indicators:
i.config(text="")
self.sm.result1.config(text="Hiba a bemeneti adatokban!")
self.sm.result2.config(text="Hiba a falvastagságban!")
vissza.append(None)
elif self.sm.shape == "Isosceles_triangle":
print("<NAME>")
t = float(self.sm.controls[-1]["entry"].get().replace(',','.'))
a = float(self.sm.controls[0]["entry"].get().replace(',','.'))
b = float(self.sm.controls[1]["entry"].get().replace(',','.'))
phi = np.arctan(b/ (a/2))
c = np.sqrt((a/2)**2 + b**2)
print('phi: ' + str(phi*180/np.pi))
print('c: ' + str(c))
s1 = b
s2 = np.sqrt(a**2 + (c/2)**2 - 2*a*(c/2)*np.cos(phi))
s3 = s2
print('s2: ' + str(s2))
print('s3: ' + str(s3))
t1 = a/3
beta = np.arccos( ( (s2/3)**2 - (2*s3/3)**2 - (c/2)**2 ) / ( -2 * (2*s3/3) * (c/2) ) )
print('beta: ' + str(beta))
t2 = (2*s2/3)* | np.sin(beta) | numpy.sin |
'''
Cross-Validation Data Classes
=============================
Scikit-learn compatible classes for performing various
types of cross-validation
'''
__all__ = ['KFoldSubject','KFoldStratified','LeaveOneSubjectOut','set_cv']
__author__ = ["<NAME>"]
__license__ = "MIT"
from sklearn.cross_validation import _BaseKFold
import numpy as np
import random
import pandas as pd
class KFoldSubject(_BaseKFold):
"""K-Folds cross validation iterator which holds out same subjects.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds while ensuring that same subject is held
out within each fold
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Extension of KFold from scikit-learn cross_validation model
Args:
n: int
Total number of elements.
labels: vector of length Y indicating subject IDs
n_folds: int, default=3
Number of folds. Must be at least 2.
shuffle: boolean, optional
Whether to shuffle the data before splitting into batches.
random_state: None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
"""
def __init__(self, n, labels, n_folds=3, shuffle=False, random_state=None):
super(KFoldSubject, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = | np.arange(n) | numpy.arange |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
import numbers
import typing
from functools import singledispatch
import numpy as np
from .builtin import (
AssociativeOp,
add,
atanh,
exp,
log,
log1p,
max,
min,
reciprocal,
safediv,
safesub,
sqrt,
tanh,
)
from .op import (
DISTRIBUTIVE_OPS,
UNITS,
BinaryOp,
FinitaryOp,
Op,
OpMeta,
ReductionOp,
TernaryOp,
UnaryOp,
declare_op_types,
)
_builtin_all = all
_builtin_any = any
# This is used only for pattern matching.
array = (np.ndarray, np.generic)
arraylist = typing.Tuple[typing.Union[array], ...]
sqrt.register(array)(np.sqrt)
exp.register(array)(np.exp)
log1p.register(array)(np.log1p)
tanh.register(array)(np.tanh)
atanh.register(array)(np.arctanh)
###########################################
# Reduction Ops
###########################################
@ReductionOp.make
def all(x, axis=None, keepdims=False):
return np.all(x, axis, keepdims=keepdims)
@ReductionOp.make
def any(x, axis=None, keepdims=False):
return np.any(x, axis, keepdims=keepdims)
@ReductionOp.make
def amax(x, axis=None, keepdims=False):
return np.amax(x, axis, keepdims=keepdims)
@ReductionOp.make
def amin(x, axis=None, keepdims=False):
return np.amin(x, axis, keepdims=keepdims)
@ReductionOp.make
def sum(x, axis=None, keepdims=False):
return np.sum(x, axis, keepdims=keepdims)
@ReductionOp.make
def prod(x, axis=None, keepdims=False):
return np.prod(x, axis, keepdims=keepdims)
@ReductionOp.make
def logsumexp(x, axis=None, keepdims=False):
amax = np.amax(x, axis=axis, keepdims=True)
# treat the case x = -inf
amax = np.where(np.isfinite(amax), amax, 0.0)
unnormalized_lse = log(np.sum(np.exp(x - amax), axis, keepdims=keepdims))
amax = amax if keepdims else amax.squeeze(axis)
return unnormalized_lse + amax
@ReductionOp.make
def mean(x, axis=None, keepdims=False):
return np.mean(x, axis, keepdims=keepdims)
@ReductionOp.make
def std(x, axis=None, ddof=0, keepdims=False):
return np.std(x, axis, ddof=ddof, keepdims=keepdims)
@ReductionOp.make
def var(x, axis=None, ddof=0, keepdims=False):
return np.var(x, axis, ddof=ddof, keepdims=keepdims)
###########################################
@UnaryOp.make
def argmax(x, axis=None, keepdims=False):
if keepdims:
return np.expand_dims(np.argmax(x, axis), axis)
return np.argmax(x, axis)
@UnaryOp.make
def argmin(x, axis=None, keepdims=False):
if keepdims:
return np.expand_dims(np.argmin(x, axis), axis)
return np.argmin(x, axis)
@UnaryOp.make
def isnan(x):
return | np.isnan(x) | numpy.isnan |
import random
import numpy as np
from sklearn.model_selection import StratifiedKFold
def run_neural_network(prepare_input_data, build_neural_network, evaluate_model):
"""
Performs cross validation for the clean classifier, using 5 splits.
:param prepare_input_data: callback to prepare input data
:param build_neural_network: callback to build the neural network
:param evaluate_model: callback to prepare and evaluate the model
:return:
"""
input_data = prepare_input_data()
random.shuffle(input_data)
images = [elem['data'] for elem in input_data]
labels = [elem['image_type'] for elem in input_data]
images = np.array(images)
labels = np.array(labels)
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=None)
cvscores = []
cvlosses = []
i = 0
for train, test in kfold.split(images, labels):
i += 1
print("cross validation: ", i)
model = build_neural_network()
val_loss, val_acc = evaluate_model(model, images[test], labels[test], images[train], labels[train])
print('Loss value ' + str(val_loss))
print('Accuracy ' + str(val_acc))
cvscores.append(val_acc * 100)
cvlosses.append(val_loss)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
print("%.2f (+/- %.2f)" % (np.mean(cvlosses), | np.std(cvlosses) | numpy.std |
import tensorflow as tf
from tensorflow.contrib import slim
from scipy import misc
import os, random
import numpy as np
from glob import glob
def prepare_data(dataset_name, size, gray_to_RGB=False):
input_list = sorted(glob('./dataset/{}/*.*'.format(dataset_name + '/trainA')))
target_list = sorted(glob('./dataset/{}/*.*'.format(dataset_name + '/trainB')))
trainA = []
trainB = []
if gray_to_RGB :
for image in input_list:
trainA.append(np.expand_dims(misc.imresize(misc.imread(image, mode='L'), [size, size]), axis=-1))
for image in input_list:
trainB.append(misc.imresize(misc.imread(image, mode='RGB'), [size, size]))
# trainA = np.repeat(trainA, repeats=3, axis=-1)
# trainA = np.array(trainA).astype(np.float32)[:, :, :, None]
else :
for image in input_list :
trainA.append(misc.imresize(misc.imread(image, mode='RGB'), [size, size]))
for image in target_list :
trainB.append(misc.imresize(misc.imread(image, mode='RGB'), [size, size]))
trainA = preprocessing(np.asarray(trainA))
trainB = preprocessing(np.asarray(trainB))
return trainA, trainB
def shuffle(x, y) :
seed = | np.random.random_integers(low=0, high=1000) | numpy.random.random_integers |
import logging
from time import sleep, time
import numpy as np
import pybullet as p
from transforms3d import euler
log = logging.getLogger(__name__)
from igibson.external.pybullet_tools.utils import (
control_joints,
get_base_values,
get_joint_positions,
get_max_limits,
get_min_limits,
get_sample_fn,
is_collision_free,
joints_from_names,
link_from_name,
plan_base_motion_2d,
plan_joint_motion,
set_base_values_with_z,
set_joint_positions,
)
from igibson.objects.visual_marker import VisualMarker
from igibson.scenes.gibson_indoor_scene import StaticIndoorScene
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.utils.utils import l2_distance, quatToXYZW, restoreState, rotate_vector_2d
class MotionPlanningWrapper(object):
"""
Motion planner wrapper that supports both base and arm motion
"""
def __init__(
self,
env=None,
base_mp_algo="birrt",
arm_mp_algo="birrt",
optimize_iter=0,
fine_motion_plan=True,
full_observability_2d_planning=False,
collision_with_pb_2d_planning=False,
visualize_2d_planning=False,
visualize_2d_result=False,
):
"""
Get planning related parameters.
"""
self.env = env
assert "occupancy_grid" in self.env.output
# get planning related parameters from env
body_ids = self.env.robots[0].get_body_ids()
assert len(body_ids) == 1, "Only single-body robots are supported."
self.robot_id = body_ids[0]
# Types of 2D planning
# full_observability_2d_planning=TRUE and collision_with_pb_2d_planning=TRUE -> We teleport the robot to locations and check for collisions
# full_observability_2d_planning=TRUE and collision_with_pb_2d_planning=FALSE -> We use the global occupancy map from the scene
# full_observability_2d_planning=FALSE and collision_with_pb_2d_planning=FALSE -> We use the occupancy_grid from the lidar sensor
# full_observability_2d_planning=FALSE and collision_with_pb_2d_planning=TRUE -> [not suported yet]
self.full_observability_2d_planning = full_observability_2d_planning
self.collision_with_pb_2d_planning = collision_with_pb_2d_planning
assert not ((not self.full_observability_2d_planning) and self.collision_with_pb_2d_planning)
self.robot_footprint_radius = self.env.sensors["scan_occ"].robot_footprint_radius
if self.full_observability_2d_planning:
# TODO: it may be better to unify and make that scene.floor_map uses OccupancyGridState values always
assert len(self.env.scene.floor_map) == 1 # We assume there is only one floor (not true for Gibson scenes)
self.map_2d = np.array(self.env.scene.floor_map[0])
self.map_2d = np.array((self.map_2d == 255)).astype(np.float32)
self.per_pixel_resolution = self.env.scene.trav_map_resolution
assert np.array(self.map_2d).shape[0] == np.array(self.map_2d).shape[1]
self.grid_resolution = self.map_2d.shape[0]
self.occupancy_range = self.grid_resolution * self.per_pixel_resolution
self.robot_footprint_radius_in_map = int(np.ceil(self.robot_footprint_radius / self.per_pixel_resolution))
else:
self.grid_resolution = self.env.grid_resolution
self.occupancy_range = self.env.sensors["scan_occ"].occupancy_range
self.robot_footprint_radius_in_map = self.env.sensors["scan_occ"].robot_footprint_radius_in_map
self.robot = self.env.robots[0]
self.base_mp_algo = base_mp_algo
self.arm_mp_algo = arm_mp_algo
# If we plan in the map, we do not need to check rotations: a location is in collision (or not) independently
# of the orientation. If we use pybullet, we may find some cases where the base orientation changes the
# collision value for the same location between True/False
if not self.collision_with_pb_2d_planning:
self.base_mp_resolutions = np.array([0.05, 0.05, 2 * np.pi])
else:
self.base_mp_resolutions = np.array([0.05, 0.05, 0.05])
self.optimize_iter = optimize_iter
self.mode = self.env.mode
self.initial_height = self.env.initial_pos_z_offset
self.fine_motion_plan = fine_motion_plan
self.robot_type = self.robot.model_name
if self.env.simulator.viewer is not None:
self.env.simulator.viewer.setup_motion_planner(self)
if self.robot_type in ["Fetch"]:
self.setup_arm_mp()
self.arm_interaction_length = 0.2
self.marker = None
self.marker_direction = None
if self.mode in ["gui_non_interactive", "gui_interactive"]:
self.marker = VisualMarker(radius=0.04, rgba_color=[0, 0, 1, 1])
self.marker_direction = VisualMarker(
visual_shape=p.GEOM_CAPSULE,
radius=0.01,
length=0.2,
initial_offset=[0, 0, -0.1],
rgba_color=[0, 0, 1, 1],
)
self.env.simulator.import_object(self.marker)
self.env.simulator.import_object(self.marker_direction)
self.visualize_2d_planning = visualize_2d_planning
self.visualize_2d_result = visualize_2d_result
def set_marker_position(self, pos):
"""
Set subgoal marker position
:param pos: position
"""
self.marker.set_position(pos)
def set_marker_position_yaw(self, pos, yaw):
"""
Set subgoal marker position and orientation
:param pos: position
:param yaw: yaw angle
"""
quat = quatToXYZW(seq="wxyz", orn=euler.euler2quat(0, -np.pi / 2, yaw))
self.marker.set_position(pos)
self.marker_direction.set_position_orientation(pos, quat)
def set_marker_position_direction(self, pos, direction):
"""
Set subgoal marker position and orientation
:param pos: position
:param direction: direction vector
"""
yaw = np.arctan2(direction[1], direction[0])
self.set_marker_position_yaw(pos, yaw)
def setup_arm_mp(self):
"""
Set up arm motion planner
"""
if self.robot_type == "Fetch":
self.arm_default_joint_positions = (
0.1,
-1.41,
1.517,
0.82,
2.2,
2.96,
-1.286,
0.0,
)
self.arm_joint_names = [
"torso_lift_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
]
self.robot_joint_names = [
"r_wheel_joint",
"l_wheel_joint",
"torso_lift_joint",
"head_pan_joint",
"head_tilt_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
"r_gripper_finger_joint",
"l_gripper_finger_joint",
]
self.arm_joint_ids = joints_from_names(
self.robot_id,
self.arm_joint_names,
)
self.robot_arm_indices = [
self.robot_joint_names.index(arm_joint_name) for arm_joint_name in self.arm_joint_names
]
self.arm_ik_threshold = 0.05
self.mp_obstacles = []
if type(self.env.scene) == StaticIndoorScene:
if self.env.scene.mesh_body_id is not None:
self.mp_obstacles.append(self.env.scene.mesh_body_id)
elif type(self.env.scene) == InteractiveIndoorScene:
self.mp_obstacles.extend(self.env.scene.get_body_ids())
# Since the refactoring, the robot is another object in the scene
# We need to remove it to not check twice for self collisions
self.mp_obstacles.remove(self.robot_id)
def plan_base_motion(self, goal):
"""
Plan base motion given a base subgoal
:param goal: base subgoal
:return: waypoints or None if no plan can be found
"""
if self.marker is not None:
self.set_marker_position_yaw([goal[0], goal[1], 0.05], goal[2])
log.debug("Motion planning base goal: {}".format(goal))
state = self.env.get_state()
x, y, theta = goal
map_2d = state["occupancy_grid"] if not self.full_observability_2d_planning else self.map_2d
if not self.full_observability_2d_planning:
yaw = self.robot.get_rpy()[2]
half_occupancy_range = self.occupancy_range / 2.0
robot_position_xy = self.robot.get_position()[:2]
corners = [
robot_position_xy + rotate_vector_2d(local_corner, -yaw)
for local_corner in [
| np.array([half_occupancy_range, half_occupancy_range]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 15:24:44 2021
@author: thele
"""
from .mock_device import build_mock_device_with_json
from ..main_utils.utils import extract_volume_from_mock_device, extract_volume_from_gpr
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import marching_cubes_lewiner
from ..Investigation.Investigation_factory import Investigation_stage
from ..main_utils.utils import Timer
def create_mock_device_from_file(configs, pm = 60.0):
device = build_mock_device_with_json(configs['playground'])
plunger_gates = configs['plunger_gates']
def jump(params,inv=False):
if inv:
return params
else:
return device.jump(params)
measure = device.measure
check = lambda: device.check(plunger_gates)
inv_timer = Timer()
investigation_stage = Investigation_stage(jump,measure,check,configs['investigation'],inv_timer)
def score(vol):
inv = investigation_stage.do_extra_measure(vol,0.0,1.0, score_thresh=0.001)['extra_measure']
if len(inv) >1:
scorev = inv[1][0]
else:
scorev = 0.0
uvec = vol/np.linalg.norm(vol)
device.jump(vol + (uvec*pm))
device_p_pinch = measure()
device.jump(vol - (uvec*pm))
device_m_pinch = measure()
device.jump(vol)
good = np.logical_xor(device_p_pinch, device_m_pinch)
return scorev*good
return device, jump, measure, check, score
def plot_device_demo(device, configs, cmap = 'winter'):
conf_g = configs['general']
dev_vol = extract_volume_from_mock_device(conf_g['lb_box'],conf_g['ub_box'],50,device)
verts, faces, points_surf = get_surface(dev_vol,conf_g['lb_box'],conf_g['ub_box'],conf_g['ub_box'], 50)
perm=[0,1,2]
preds = np.array([0.5]*points_surf.shape[0])
cmap = plt.get_cmap(cmap)
c_preds = cmap(preds).squeeze()
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_trisurf(verts[:, perm[0]], verts[:, perm[1]], faces, verts[:, perm[2]],
lw=0.1,edgecolor="black",alpha=0.5,vmin=0,vmax=1.)
surf.set_facecolor(c_preds.tolist())
ax.set_xlim([conf_g['ub_box'][0],conf_g['lb_box'][0]])
ax.set_ylim([conf_g['ub_box'][1],conf_g['lb_box'][1]])
ax.set_zlim([conf_g['ub_box'][2],conf_g['lb_box'][2]])
ax.set_ylabel("Gate 1 / mV")
ax.set_xlabel("Gate 2 / mV")
ax.set_zlabel("Gate 3 / mV")
plt.show()
def plot_gpr_demo(gpr, configs, origin = [0,0,0], obs = None, cmap = 'winter'):
conf_g = configs['general']
dev_vol = extract_volume_from_gpr(conf_g['lb_box'],conf_g['ub_box'],50,gpr)
verts, faces, points_surf = get_surface(dev_vol,conf_g['lb_box'],conf_g['ub_box'],conf_g['ub_box'], 50)
vol_origin = np.array(origin)
vol_origin[[0,1,2]]=vol_origin[[1,0,2]]
verts = (verts)+vol_origin
perm=[0,1,2]
if not isinstance(cmap, str):
preds = cmap[1](points_surf)
else:
preds = | np.array([0.5]*points_surf.shape[0]) | numpy.array |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Custom TensorFlow ops for efficient resampling of 2D images."""
import os
import numpy as np
import tensorflow as tf
from models.stylegan2.layers.cuda import custom_ops
def _get_plugin():
return custom_ops.get_plugin(os.path.splitext(__file__)[0] + ".cu")
# ----------------------------------------------------------------------------
def upfirdn_2d(
x,
k,
upx=1,
upy=1,
downx=1,
downy=1,
padx0=0,
padx1=0,
pady0=0,
pady1=0,
impl="cuda",
):
r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
and performs the following operations for each image, batched across
`majorDim` and `minorDim`:
1. Pad the image with zeros by the specified number of pixels on each side
(`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
corresponds to cropping the image.
2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
image so that the footprint of all output pixels lies within the input image.
4. Downsample the image by throwing away pixels (`downx`, `downy`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
k: 2D FIR filter of the shape `[firH, firW]`.
upx: Integer upsampling factor along the X-axis (default: 1).
upy: Integer upsampling factor along the Y-axis (default: 1).
downx: Integer downsampling factor along the X-axis (default: 1).
downy: Integer downsampling factor along the Y-axis (default: 1).
padx0: Number of pixels to pad on the left side (default: 0).
padx1: Number of pixels to pad on the right side (default: 0).
pady0: Number of pixels to pad on the top side (default: 0).
pady1: Number of pixels to pad on the bottom side (default: 0).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
"""
impl_dict = {
"ref": _upfirdn_2d_ref,
"cuda": _upfirdn_2d_cuda,
}
return impl_dict[impl](
x=x,
k=k,
upx=upx,
upy=upy,
downx=downx,
downy=downy,
padx0=padx0,
padx1=padx1,
pady0=pady0,
pady1=pady1,
)
# ----------------------------------------------------------------------------
def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
x = tf.convert_to_tensor(x)
k = | np.asarray(k, dtype=np.float32) | numpy.asarray |
# Copyright (c) 2020 <NAME> & <NAME>
# FEniCS Project
# SPDX-License-Identifier: MIT
import libtab
import numpy
import pytest
import sympy
from .test_lagrange import sympy_disc_lagrange
def sympy_nedelec(celltype, n):
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
from sympy import S
topology = libtab.topology(celltype)
geometry = S(libtab.geometry(celltype).astype(int))
dummy = [sympy.Symbol("DUMMY1"), sympy.Symbol("DUMMY2"), sympy.Symbol("DUMMY3")]
funcs = []
if celltype == libtab.CellType.triangle:
tdim = 2
for i in range(n):
for j in range(n - i):
for d in range(2):
funcs += [[x**j * y**i if k == d else 0 for k in range(2)]]
for i in range(n):
funcs += [[x ** (n - 1 - i) * y ** (i + 1),
-x ** (n - i) * y ** i]]
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# edge tangents
if n == 1:
edge_basis = [sympy.Integer(1)]
else:
edge_basis = sympy_disc_lagrange(libtab.CellType.interval, n - 1)
edge_basis = [a.subs(x, dummy[0]) for a in edge_basis]
for i, f in enumerate(funcs):
j = 0
for edge in topology[1]:
edge_geom = [geometry[t, :] for t in edge]
tangent = edge_geom[1] - edge_geom[0]
norm = sympy.sqrt(sum(i ** 2 for i in tangent))
tangent = [i / norm for i in tangent]
param = [(1 - dummy[0]) * a + dummy[0] * b for a, b in zip(edge_geom[0], edge_geom[1])]
for g in edge_basis:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, tangent))
integrand = integrand.subs(x, param[0]).subs(y, param[1])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1))
j += 1
# interior dofs
if n > 1:
if n == 2:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_disc_lagrange(libtab.CellType.triangle, n - 2)
for i, f in enumerate(funcs):
j = n * 3
for g in face_basis:
for vec in [(1, 0), (0, 1)]:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, vec)) * g
mat[i, j] = integrand.integrate((x, 0, 1 - y)).integrate((y, 0, 1))
j += 1
elif celltype == libtab.CellType.tetrahedron:
tdim = 3
for i in range(n):
for j in range(n - i):
for k in range(n - i - j):
for d in range(3):
funcs += [[x**k * y**j * z**i if m == d else 0 for m in range(3)]]
if n == 1:
funcs += [[y, -x, sympy.Integer(0)], [z, sympy.Integer(0), -x], [sympy.Integer(0), z, -y]]
elif n == 2:
funcs += [
[y ** 2, -x * y, sympy.Integer(0)],
[x * y, -x ** 2, sympy.Integer(0)],
[z * y, -z * x, sympy.Integer(0)],
[sympy.Integer(0), y * z, -y ** 2],
[sympy.Integer(0), z ** 2, -z * y],
[sympy.Integer(0), x * z, -x * y],
[x * z, sympy.Integer(0), -x ** 2],
[z ** 2, sympy.Integer(0), -z * x],
]
elif n == 3:
funcs += [
[x ** 2 * y, -x ** 3, sympy.Integer(0)],
[x ** 2 * z, sympy.Integer(0), -x ** 3],
[sympy.Integer(0), x ** 2 * z, -x ** 2 * y],
[x * y ** 2, -x ** 2 * y, sympy.Integer(0)],
[2 * x * y * z, -x ** 2 * z, -x ** 2 * y],
[sympy.Integer(0), x * y * z, -x * y ** 2],
[x * z ** 2, sympy.Integer(0), -x ** 2 * z],
[sympy.Integer(0), x * z ** 2, -x * y * z],
[y ** 3, -x * y ** 2, sympy.Integer(0)],
[9 * y ** 2 * z, -4 * x * y * z, -5 * x * y ** 2],
[sympy.Integer(0), y ** 2 * z, -y ** 3],
[9 * y * z ** 2, -5 * x * z ** 2, -4 * x * y * z],
[sympy.Integer(0), y * z ** 2, -y ** 2 * z],
[z ** 3, sympy.Integer(0), -x * z ** 2],
[sympy.Integer(0), z ** 3, -y * z ** 2],
]
else:
raise NotImplementedError
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# edge tangents
if n == 1:
edge_basis = [sympy.Integer(1)]
else:
edge_basis = sympy_disc_lagrange(libtab.CellType.interval, n - 1)
edge_basis = [a.subs(x, dummy[0]) for a in edge_basis]
for i, f in enumerate(funcs):
j = 0
for edge in topology[1]:
edge_geom = [geometry[t, :] for t in edge]
tangent = edge_geom[1] - edge_geom[0]
norm = sympy.sqrt(sum(i ** 2 for i in tangent))
tangent = [i / norm for i in tangent]
param = [(1 - dummy[0]) * a + dummy[0] * b for a, b in zip(edge_geom[0], edge_geom[1])]
for g in edge_basis:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, tangent))
integrand = integrand.subs(x, param[0]).subs(y, param[1]).subs(z, param[2])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1))
j += 1
# face dofs
if n > 1:
if n == 2:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_disc_lagrange(libtab.CellType.triangle, n - 2)
face_basis = [a.subs(x, dummy[0]).subs(y, dummy[1]) for a in face_basis]
for i, f in enumerate(funcs):
j = n * 6
for face in topology[2]:
face_geom = [geometry[t, :] for t in face]
axes = [face_geom[1] - face_geom[0], face_geom[2] - face_geom[0]]
norm = sympy.sqrt(sum(i**2 for i in
[axes[0][1] * axes[1][2] - axes[0][2] * axes[1][1],
axes[0][2] * axes[1][0] - axes[0][0] * axes[1][2],
axes[0][0] * axes[1][1] - axes[0][1] * axes[1][0]]))
scaled_axes = []
for a in axes:
axisnorm = sympy.sqrt(sum(k**2 for k in a))
scaled_axes.append([k / axisnorm for k in a])
param = [a + dummy[0] * b + dummy[1] * c for a, b, c in zip(face_geom[0], *axes)]
for g in face_basis:
for vec in scaled_axes:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, vec))
integrand = integrand.subs(x, param[0]).subs(y, param[1]).subs(z, param[2])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1 - dummy[1])).integrate((dummy[1], 0, 1))
j += 1
# interior dofs
if n > 2:
if n == 3:
interior_basis = [sympy.Integer(1)]
else:
interior_basis = sympy_disc_lagrange(libtab.CellType.tetrahedron, n - 3)
for i, f in enumerate(funcs):
j = n * 6 + 4 * n * (n - 1)
for g in interior_basis:
for vec in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, vec))
integrand *= g
mat[i, j] = integrand.integrate((x, 0, 1 - y - z)).integrate((y, 0, 1 - z)).integrate((z, 0, 1))
j += 1
mat = sympy.Matrix(mat)
mat = mat.inv()
g = []
for dim in range(tdim):
for r in range(mat.shape[0]):
g += [sum([v * funcs[i][dim] for i, v in enumerate(mat.row(r))])]
return g
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tri(order):
celltype = libtab.CellType.triangle
g = sympy_nedelec(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
nedelec = libtab.Nedelec("triangle", order)
pts = libtab.create_lattice(celltype, 6, libtab.LatticeType.equispaced, True)
nderiv = 3
wtab = nedelec.tabulate(nderiv, pts)
for kx in range(nderiv):
for ky in range(0, nderiv - kx):
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]), (y, p[1])])
assert(numpy.isclose(wtab[libtab.index(kx, ky)], wsym).all())
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tet(order):
celltype = libtab.CellType.tetrahedron
g = sympy_nedelec(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
nedelec = libtab.Nedelec("tetrahedron", order)
pts = libtab.create_lattice(celltype, 6, libtab.LatticeType.equispaced, True)
nderiv = 1
wtab = nedelec.tabulate(nderiv, pts)
for k in range(nderiv + 1):
for q in range(k + 1):
for kx in range(q + 1):
ky = q - kx
kz = k - q
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky, z, kz)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]),
(y, p[1]),
(z, p[2])])
assert(numpy.isclose(wtab[libtab.index(kx, ky, kz)], wsym).all())
@pytest.mark.parametrize("order", [1, 2, 3, 4])
def test_dof_permutations_triangle(order):
nedelec = libtab.Nedelec("triangle", order)
permuted = {}
if order == 2:
# Reflect 2 DOFs on edges
permuted[0] = {0: 1, 1: 0}
permuted[1] = {2: 3, 3: 2}
permuted[2] = {4: 5, 5: 4}
elif order == 3:
# Reflect 3 DOFs on edges
permuted[0] = {0: 2, 2: 0}
permuted[1] = {3: 5, 5: 3}
permuted[2] = {6: 8, 8: 6}
elif order == 4:
# Reflect 4 DOFs on edges
permuted[0] = {0: 3, 1: 2, 2: 1, 3: 0}
permuted[1] = {4: 7, 5: 6, 6: 5, 7: 4}
permuted[2] = {8: 11, 9: 10, 10: 9, 11: 8}
base_perms = nedelec.base_permutations
assert len(base_perms) == 3
for i, perm in enumerate(base_perms):
actual = | numpy.zeros_like(perm) | numpy.zeros_like |
# -*- coding: utf-8 -*-
# tomolab
# <NAME>
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
import numpy as np
class ColorLookupTable():
def __init__(self):
self._by_index = {}
self._by_name = {}
def n_entries(self):
return len(self._by_index)
def load_from_file_freesurfer(self, filename):
with open(filename, 'r') as fid:
F = fid.read()
F = F.split('\r\n')
self._by_index = {}
self._by_name = {}
for line in F:
if not line == '':
index, name, r, g, b, a = line.split()
r = | np.uint8(r) | numpy.uint8 |
from __future__ import division, print_function, absolute_import
import os
import numpy as np
import logging
from dipy.io.image import load_nifti
from dipy.workflows.workflow import Workflow
class IoInfoFlow(Workflow):
@classmethod
def get_short_name(cls):
return 'io_info'
def run(self, input_files,
b0_threshold=50, bvecs_tol=0.01, bshell_thr=100):
""" Provides useful information about different files used in
medical imaging. Any number of input files can be provided. The
program identifies the type of file by its extension.
Parameters
----------
input_files : variable string
Any number of Nifti1, bvals or bvecs files.
b0_threshold : float, optional
(default 50)
bvecs_tol : float, optional
Threshold used to check that norm(bvec) = 1 +/- bvecs_tol
b-vectors are unit vectors (default 0.01)
bshell_thr : float, optional
Threshold for distinguishing b-values in different shells
(default 100)
"""
np.set_printoptions(3, suppress=True)
io_it = self.get_io_iterator()
for input_path in io_it:
logging.info('------------------------------------------')
logging.info('Looking at {0}'.format(input_path))
logging.info('------------------------------------------')
ipath_lower = input_path.lower()
if ipath_lower.endswith('.nii') or ipath_lower.endswith('.nii.gz'):
data, affine, img, vox_sz, affcodes = load_nifti(
input_path,
return_img=True,
return_voxsize=True,
return_coords=True)
logging.info('Data size {0}'.format(data.shape))
logging.info('Data type {0}'.format(data.dtype))
logging.info('Data min {0} max {1} avg {2}'
.format(data.min(), data.max(), data.mean()))
logging.info('2nd percentile {0} 98th percentile {1}'
.format(np.percentile(data, 2),
np.percentile(data, 98)))
logging.info('Native coordinate system {0}'
.format(''.join(affcodes)))
logging.info('Affine to RAS1mm \n{0}'.format(affine))
logging.info('Voxel size {0}'.format(np.array(vox_sz)))
if np.sum(np.abs(np.diff(vox_sz))) > 0.1:
msg = \
'Voxel size is not isotropic. Please reslice.\n'
logging.warning(msg)
if os.path.basename(input_path).lower().find('bval') > -1:
bvals = np.loadtxt(input_path)
logging.info('Bvalues \n{0}'.format(bvals))
logging.info('Total number of bvalues {}'.format(len(bvals)))
shells = np.sum(np.diff(np.sort(bvals)) > bshell_thr)
logging.info('Number of gradient shells {0}'.format(shells))
logging.info('Number of b0s {0} (b0_thr {1})\n'
.format( | np.sum(bvals <= b0_threshold) | numpy.sum |
####
#
# The MIT License (MIT)
#
# Copyright 2019, 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import time
import unittest
import numpy as np
from ssvm.kernel_utils import minmax_kernel, tanimoto_kernel
from ssvm.feature_utils import CountingFpsBinarizer, RemoveCorrelatedFeatures
class TestCorrelatedFeatureRemoval(unittest.TestCase):
def test_corner_cases(self):
# No feature exceeds the correlation threshold
X = np.random.RandomState(13).rand(1010, 29)
mask = RemoveCorrelatedFeatures().fit(X).get_support()
self.assertTrue(np.all(mask))
# All features are correlated
X = np.array([
np.random.RandomState(101).rand(5),
np.random.RandomState(101).rand(5),
np.random.RandomState(101).rand(5),
]).T
mask = RemoveCorrelatedFeatures().fit(X).get_support()
self.assertEqual(1, np.sum(mask))
def test_correct_feature_removal(self):
X = np.random.RandomState(43).random((3, 4))
# array([[0.11505457, 0.60906654, 0.13339096, 0.24058962],
# [0.32713906, 0.85913749, 0.66609021, 0.54116221],
# [0.02901382, 0.7337483 , 0.39495002, 0.80204712]])
R = np.corrcoef(X.T)
# array([[1., 0.69228233, 0.69857039, -0.24099928],
# [0.69228233, 1., 0.99996171, 0.53351747],
# [0.69857039, 0.99996171, 1., 0.52609601],
# [-0.24099928, 0.53351747, 0.52609601, 1.]])
mask_ref = [True, True, False, True]
mask = RemoveCorrelatedFeatures().fit(X).get_support()
np.testing.assert_array_equal(mask_ref, mask)
class TestCountingFpsBinarizer(unittest.TestCase):
def setUp(self) -> None:
self.X1 = np.array(
[
[1, 0, 0, 3, 4, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[12, 12, 12, 12, 12, 12, 12],
[0, 1, 2, 4, 0, 12, 5]
]
)
self.X2 = np.array(
[
[1, 0, 0, 3, 4, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 2, 2, 0, 4, 12, 12],
[0, 1, 2, 4, 0, 12, 5]
]
)
self.X3 = np.random.RandomState(111).randint(0, 12, size=(22, 45))
self.X4 = np.random.RandomState(111).randint(0, 16, size=(300000, 307))
self.X5 = np.array(
[
[1, 1, 0, 3, 4, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
[1, 1, 2, 0, 4, 5, 0],
[0, 1, 2, 4, 0, 5, 0]
]
)
def test_length(self):
trans = CountingFpsBinarizer(bin_centers=np.array([1, 2, 3, 4, 8]), compress=True)
trans.fit(self.X2)
self.assertEqual(1 + 2 + 2 + 4 + 4 + 5 + 5, len(trans))
def test_conversion(self):
trans = CountingFpsBinarizer(bin_centers=np.array([1, 2, 3, 4, 8]))
Z = trans.fit_transform(self.X1)
self.assertEqual((len(self.X1), self.X1.shape[1] * 5), Z.shape)
np.testing.assert_array_equal(
np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]
]
),
Z
)
def test_edge_cases(self):
trans = CountingFpsBinarizer(bin_centers=np.array([1]))
Z = trans.fit_transform(self.X1)
self.assertEqual(self.X1.shape, Z.shape)
| np.testing.assert_array_equal(self.X1 > 0, Z) | numpy.testing.assert_array_equal |
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from numpy.linalg import inv
import matplotlib.colors as colors
import math
from matplotlib import cm
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
####### Parámetros #######
# número de muestras
N = 400
# parámetro a de la ecuación de estado
a = 1
# varianza del ruido de excitación
var_u = 0.005
# varianza del ruido de observación
var_w = 0.05
# media y varianza de f0[-1]
mu_f0_i = 0.2
var_f0_i = 0.1
# parámetros del filtro de Kalman
# número de parámetros
p = 3
# matriz de transcición de estados
B = np.array([[0], [0], [1]])
H = np.array([[1, 0, 0]])
# condiciones iniciales del filtro de Kalman
# s[-1|-1]
s_est_i = np.array([[0.5], [0.5], [mu_f0_i]])
# M[-1|-1]
C_s_i = 100 * np.eye(p)
q = 0.001
def fun_a(s):
a_1 = s[0] * math.cos(s[2]) - s[1] * math.sin(s[2])
a_2 = s[0] * math.sin(s[2]) + s[1] * math.cos(s[2])
a_3 = a * s[2]
return np.array([a_1, a_2, a_3])
def fun_A(s):
A_1 = [math.cos(s[2]), -math.sin(s[2]), -s[0] * math.sin(s[2]) - s[1] * math.cos(s[2])]
A_2 = [math.sin(s[2]), math.cos(s[2]), s[0] * math.cos(s[2]) - s[1] * math.sin(s[2])]
A_3 = [0, 0, a]
return np.array([A_1, A_2, A_3])
### Fin de parámetros ###
ns = np.arange(N)
# generación de la frecuencia instantanea
f0d_1 = np.zeros((N,))
N1 = 100
N2 = 300
f01 = 0.1
f02 = 0.3
f0d_1[:N1] = f01
f0d_1[N1:N2] = (f02 - f01) / (N2 - N1) * np.arange(N2 - N1) + f01
f0d_1[N2:] = f02
f01 = 0.1
f02 = 0.3
N1 = 200
f0d_1[:N1] = f01
f0d_1[N1:] = f02
var_u = 0.01
q = 0.005
# generación de las observaciones
phi = 2 * np.pi * np.cumsum(f0d_1)
y = np.cos(phi)
x = y + np.random.normal(0, np.sqrt(var_w), N)
# variables para guardar los resultados
s_ests = | np.zeros((p, N)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
from astropy import constants
from matplotlib import pyplot as plt
from scipy.interpolate import PchipInterpolator, interp1d
def sinebell(n=1000, index=0.5):
""" sine bell to left & right end of spectra """
return np.sin(np.linspace(0,np.pi,n))**index
def sinebell_like(x, index=0.5):
return sinebell(len(x), index=index)
def test_sinebell():
plt.figure();
plt.plot(sinebell(4000,1))
plt.plot(sinebell(4000,1/2))
plt.plot(sinebell(4000,1/3))
return
def test_sinebell2():
""" load data """
wave, flux, flux_err = np.loadtxt('/hydrogen/projects/song/delCep_order20.dat').T
# flux_sine = flux - flux.mean()
flux_sine = 1-flux
flux_sine = flux_sine * sinebell_like(flux, 1.0)
plt.figure()
plt.plot(wave, (flux-1))
plt.plot(wave, flux_sine)
# plot(wave, flux_err)
return wave, flux_sine
def xcorr_rvgrid(wave_obs, flux_obs, wave_mod, flux_mod, mask_obs=None, rv_grid= | np.arange(-500, 510, 10) | numpy.arange |
##############################################################################
# get_utils.py
#
# Description:
# Defines functions to get exiting properties from
# trajectories in a trajectories object.
#
# Last Edited:
# 2020/12/24
#
# Created By:
# <NAME>
#
##############################################################################
# Importing relevant packages.
import numpy as np
##############################################################################
# Define get_start_time() function.
def get_start_time(self):
"""
Returns times when particles are released (start of trajectory).
The start time (ns) is the time elapsed since the begining of the
simulation and is returned for all trajectories as a new ndarray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
Returns
-------
start_time : ndarray
release time (ns) of each particle, with dimension (traj).
"""
# --------------------------------------------
# Determining indices of starting time values.
# --------------------------------------------
# Defining Time variable with NaT values replaced with NaNs.
Time = self.data.time.values
Time = np.where(np.isnat(Time), np.nan, Time.astype(int))
# Find indices of minimum values in time variable, start_ind.
start_ind = np.nanargmin(Time, axis=1)
# ----------------------------------------------------------
# Using start_ind to determine start times for trajectories.
# ----------------------------------------------------------
# Using start_ind as the indices to determine start_time.
# start_time is in timedelta64 format, unlike Time.
start_time = np.take_along_axis(self.data.time.values, np.expand_dims(start_ind, axis=-1), axis=-1).squeeze(axis=-1)
# Returning release times as ndarray, start_time.
return start_time
##############################################################################
# Define get_start_loc() function.
def get_start_loc(self):
"""
Returns locations where particles are released (start of trajectory).
The start location is divided into start_lon, start_lat and start_z
which are returned for all trajectories as new ndarrays.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
Returns
-------
start_lat: ndarray
latitude of each particle at the time of release, with
dimension (traj).
start_lon: ndarray
longitude of each particle at the time of release, with
dimension (traj).
start_z: ndarray
depth of each particle at the time of release, with
dimension (traj).
"""
# --------------------------------------------
# Determining indices of starting time values.
# --------------------------------------------
# Defining Time variable with NaT values replaced with NaNs.
Time = self.data.time.values
Time = np.where(np.isnat(Time), np.nan, Time.astype(int))
# Find indices of minimum values in time variable, start_ind.
start_ind = np.nanargmin(Time, axis=1)
# -----------------------------------------------------------------
# Using start_ind to determine starting locations for trajectories.
# -----------------------------------------------------------------
# Using start_ind as the indices to determine start_lat.
start_lat = np.take_along_axis(self.data.lat.values, np.expand_dims(start_ind, axis=-1), axis=-1).squeeze(axis=-1)
# Using start_ind as the indices to determine start_lon.
start_lon = np.take_along_axis(self.data.lon.values, | np.expand_dims(start_ind, axis=-1) | numpy.expand_dims |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_weak_dominance [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_weak_dominance&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=doc-s_weak_dominance).
# +
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from arpym.statistics import simulate_normal
from arpym.tools import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_weak_dominance-parameters)
mu_ = np.array([1, 0]) # mean vector of jointly normal variables
sigma2_ = np.array([[1, 0],
[0, 1]]) # covariance matrix
j_ = 5000 # number of simulations
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_weak_dominance-implementation-step01): Calculate marginal cdfs and pdfs
# get pdf and cdf of X_1 and X_2
llim = np.floor(
min(mu_[0]-5*np.sqrt(sigma2_[0, 0]), mu_[1]-5*np.sqrt(sigma2_[1, 1]))
)
ulim = np.ceil(
max(mu_[0]+5*np.sqrt(sigma2_[0, 0]), mu_[1]+5*np.sqrt(sigma2_[1, 1]))
)
x_grid = np.linspace(llim, ulim, 100)
pdf_1 = sp.stats.norm.pdf(x_grid, mu_[0], np.sqrt(sigma2_[0, 0]))
pdf_2 = sp.stats.norm.pdf(x_grid, mu_[1], np.sqrt(sigma2_[1, 1]))
cdf_1 = sp.stats.norm.cdf(x_grid, mu_[0], | np.sqrt(sigma2_[0, 0]) | numpy.sqrt |
"""Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
from scipy.misc import factorial
from numpy.polynomial.polynomial import polyval as npp_polyval
import math
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs( | real(ez + integ) | numpy.real |
#!/usr/bin/env python3
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
def runCmd(cmd, silent=0):
import os
if silent == 0:
print("{}".format(cmd))
status = os.system(cmd)
if status != 0:
raise Exception('error when running:\n{}\n'.format(cmd))
def find_vrt_keyword(xmlfile, keyword):
from xml.etree.ElementTree import ElementTree
value = None
xmlx = ElementTree(file=open(xmlfile,'r')).getroot()
#try 10 times
for i in range(10):
path=''
for j in range(i):
path += '*/'
value0 = xmlx.find(path+keyword)
if value0 != None:
value = value0.text
break
return value
def find_vrt_file(xmlfile, keyword, relative_path=True):
'''
find file in vrt in another directory
xmlfile: vrt file
relative_path: True: return relative (to current directory) path of the file
False: return absolute path of the file
'''
import os
#get absolute directory of xmlfile
xmlfile_dir = os.path.dirname(os.path.abspath(xmlfile))
#find source file path
file = find_vrt_keyword(xmlfile, keyword)
#get absolute path of source file
file = os.path.abspath(os.path.join(xmlfile_dir, file))
#get relative path of source file
if relative_path:
file = os.path.relpath(file, './')
return file
def create_xml(fileName, width, length, fileType):
import isceobj
if fileType == 'slc':
image = isceobj.createSlcImage()
elif fileType == 'int':
image = isceobj.createIntImage()
elif fileType == 'amp':
image = isceobj.createAmpImage()
elif fileType == 'cor':
image = isceobj.createOffsetImage()
elif fileType == 'rmg' or fileType == 'unw':
image = isceobj.Image.createUnwImage()
elif fileType == 'byte':
image = isceobj.createImage()
image.setDataType('BYTE')
elif fileType == 'float':
image = isceobj.createImage()
image.setDataType('FLOAT')
elif fileType == 'double':
image = isceobj.createImage()
image.setDataType('DOUBLE')
else:
raise Exception('format not supported yet!\n')
image.setFilename(fileName)
image.extraFilename = fileName + '.vrt'
image.setWidth(width)
image.setLength(length)
#image.setAccessMode('read')
#image.createImage()
image.renderHdr()
#image.finalizeImage()
def multilook_v1(data, nalks, nrlks, mean=True):
'''
doing multiple looking
ATTENSION: original array changed after running this function
'''
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
for i in range(1, nalks):
data[0:length2*nalks:nalks, :] += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] += data[0:length2*nalks:nalks, i:width2*nrlks:nrlks]
if mean:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data[0:length2*nalks:nalks, 0:width2*nrlks:nrlks]
def multilook(data, nalks, nrlks, mean=True):
'''
doing multiple looking
'''
import numpy as np
(length, width)=data.shape
width2 = int(width/nrlks)
length2 = int(length/nalks)
data2=np.zeros((length2, width), dtype=data.dtype)
for i in range(0, nalks):
data2 += data[i:length2*nalks:nalks, :]
for i in range(1, nrlks):
data2[:, 0:width2*nrlks:nrlks] += data2[:, i:width2*nrlks:nrlks]
if mean:
return data2[:, 0:width2*nrlks:nrlks] / nrlks / nalks
else:
return data2[:, 0:width2*nrlks:nrlks]
def cal_coherence_1(inf, win=5):
'''
Compute coherence using scipy convolve 2D. Same as "def cal_coherence(inf, win=5):" in funcs.py in insarzd
#still use standard coherence estimation equation, but with magnitude removed.
#for example, equation (2) in
#<NAME> and <NAME>, Accurate Estimation of Correlation in InSAR Observations,
#IEEE GEOSCIENCE AND REMOTE SENSING LETTERS, VOL. 2, NO. 2, APRIL 2005.
'''
import numpy as np
import scipy.signal as ss
filt = np.ones((win,win))/ (1.0*win*win)
flag = ss.convolve2d((inf!=0), filt, mode='same')
angle = inf / (np.absolute(inf)+(inf==0))
cor = ss.convolve2d(angle, filt, mode='same')
cor = np.absolute(cor)
#remove incomplete convolution result
cor[np.nonzero(flag < 0.999)] = 0.0
#print(np.max(cor), np.min(cor))
#cor.astype(np.float32).tofile(f)
return cor
def computeOffsetFromOrbit(referenceSwath, referenceTrack, secondarySwath, secondaryTrack, referenceSample, referenceLine):
'''
compute range and azimuth offsets using orbit. all range/azimuth indexes start with 0
referenceSample: reference sample where offset is computed, no need to be integer
referenceLine: reference line where offset is computed, no need to be integer
'''
import datetime
pointingDirection = {'right': -1, 'left' :1}
#compute a pair of range and azimuth offsets using geometry
#using Piyush's code for computing range and azimuth offsets
midRange = referenceSwath.startingRange + referenceSwath.rangePixelSize * referenceSample
midSensingStart = referenceSwath.sensingStart + datetime.timedelta(seconds = referenceLine / referenceSwath.prf)
llh = referenceTrack.orbit.rdr2geo(midSensingStart, midRange, side=pointingDirection[referenceTrack.pointingDirection])
slvaz, slvrng = secondaryTrack.orbit.geo2rdr(llh, side=pointingDirection[referenceTrack.pointingDirection])
###Translate to offsets
#at this point, secondary range pixel size and prf should be the same as those of reference
rgoff = ((slvrng - secondarySwath.startingRange) / referenceSwath.rangePixelSize) - referenceSample
azoff = ((slvaz - secondarySwath.sensingStart).total_seconds() * referenceSwath.prf) - referenceLine
return (rgoff, azoff)
def overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2):
startfreq1 = centerfreq1 - bandwidth1 / 2.0
endingfreq1 = centerfreq1 + bandwidth1 / 2.0
startfreq2 = centerfreq2 - bandwidth2 / 2.0
endingfreq2 = centerfreq2 + bandwidth2 / 2.0
overlapfreq = []
if startfreq2 <= startfreq1 <= endingfreq2:
overlapfreq.append(startfreq1)
if startfreq2 <= endingfreq1 <= endingfreq2:
overlapfreq.append(endingfreq1)
if startfreq1 < startfreq2 < endingfreq1:
overlapfreq.append(startfreq2)
if startfreq1 < endingfreq2 < endingfreq1:
overlapfreq.append(endingfreq2)
if len(overlapfreq) != 2:
#no overlap bandwidth
return None
else:
startfreq = min(overlapfreq)
endingfreq = max(overlapfreq)
return [startfreq, endingfreq]
def readOffset(filename):
from isceobj.Location.Offset import OffsetField,Offset
with open(filename, 'r') as f:
lines = f.readlines()
# 0 1 2 3 4 5 6 7
#retstr = "%s %s %s %s %s %s %s %s" % (self.x,self.dx,self.y,self.dy,self.snr, self.sigmax, self.sigmay, self.sigmaxy)
offsets = OffsetField()
for linex in lines:
#linexl = re.split('\s+', linex)
#detect blank lines with only spaces and tabs, lines with invalid numbers
if (linex.strip() == '') or ('*' in linex):
continue
linexl = linex.split()
offset = Offset()
#offset.setCoordinate(int(linexl[0]),int(linexl[2]))
offset.setCoordinate(float(linexl[0]),float(linexl[2]))
offset.setOffset(float(linexl[1]),float(linexl[3]))
offset.setSignalToNoise(float(linexl[4]))
offset.setCovariance(float(linexl[5]),float(linexl[6]),float(linexl[7]))
offsets.addOffset(offset)
return offsets
def writeOffset(offset, fileName):
offsetsPlain = ''
for offsetx in offset:
offsetsPlainx = "{}".format(offsetx)
offsetsPlainx = offsetsPlainx.split()
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(float(offsetsPlainx[0])),
float(offsetsPlainx[1]),
int(float(offsetsPlainx[2])),
float(offsetsPlainx[3]),
float(offsetsPlainx[4]),
float(offsetsPlainx[5]),
float(offsetsPlainx[6]),
float(offsetsPlainx[7])
)
offsetFile = fileName
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
def reformatGeometricalOffset(rangeOffsetFile, azimuthOffsetFile, reformatedOffsetFile, rangeStep=1, azimuthStep=1, maximumNumberOfOffsets=10000):
'''
reformat geometrical offset as ampcor output format
'''
import numpy as np
import isceobj
img = isceobj.createImage()
img.load(rangeOffsetFile+'.xml')
width = img.width
length = img.length
step = int(np.sqrt(width*length/maximumNumberOfOffsets) + 0.5)
if step == 0:
step = 1
rgoff = np.fromfile(rangeOffsetFile, dtype=np.float32).reshape(length, width)
azoff = np.fromfile(azimuthOffsetFile, dtype=np.float32).reshape(length, width)
offsetsPlain = ''
for i in range(0, length, step):
for j in range(0, width, step):
if (rgoff[i][j] == -999999.0) or (azoff[i][j] == -999999.0):
continue
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(j*rangeStep+1),
float(rgoff[i][j])*rangeStep,
int(i*azimuthStep+1),
float(azoff[i][j])*azimuthStep,
float(22.00015),
float(0.000273),
float(0.002126),
float(0.000013)
)
with open(reformatedOffsetFile, 'w') as f:
f.write(offsetsPlain)
return
def cullOffsets(offsets):
import isceobj
from iscesys.StdOEL.StdOELPy import create_writer
distances = (10,5,3,3,3,3,3,3)
#numCullOffsetsLimits = (100, 75, 50, 50, 50, 50, 50, 50)
numCullOffsetsLimits = (50, 40, 30, 30, 30, 30, 30, 30)
refinedOffsets = offsets
for i, (distance, numCullOffsetsLimit) in enumerate(zip(distances, numCullOffsetsLimits)):
cullOff = isceobj.createOffoutliers()
cullOff.wireInputPort(name='offsets', object=refinedOffsets)
cullOff.setSNRThreshold(2.0)
cullOff.setDistance(distance)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
stdWriter = create_writer("log", "", True, filename="offoutliers.log")
stdWriter.setFileTag("offoutliers", "log")
stdWriter.setFileTag("offoutliers", "err")
stdWriter.setFileTag("offoutliers", "out")
cullOff.setStdWriter(stdWriter)
#run it
cullOff.offoutliers()
refinedOffsets = cullOff.getRefinedOffsetField()
numLeft = len(refinedOffsets._offsets)
print('Number of offsets left after %2dth culling: %5d'%(i, numLeft))
if numLeft < numCullOffsetsLimit:
refinedOffsets = None
stdWriter.finalize()
return refinedOffsets
def cullOffsetsRoipac(offsets, numThreshold=50):
'''
cull offsets using fortran program from ROI_PAC
numThreshold: minmum number of offsets left
'''
import os
from contrib.alos2proc_f.alos2proc_f import fitoff
from isceobj.Alos2Proc.Alos2ProcPublic import readOffset
from isceobj.Alos2Proc.Alos2ProcPublic import writeOffset
offsetFile = 'offset.off'
cullOffsetFile = 'cull.off'
writeOffset(offsets, offsetFile)
#try different parameters to cull offsets
breakFlag = 0
for maxrms in [0.08, 0.16, 0.24]:
for nsig in [1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9]:
fitoff(offsetFile, cullOffsetFile, nsig, maxrms, numThreshold)
#check number of matching points left
with open(cullOffsetFile, 'r') as ff:
numCullOffsets = sum(1 for linex in ff)
if numCullOffsets < numThreshold:
print('offsets culling with nsig {} maxrms {}: {} left after culling, too few points'.format(nsig, maxrms, numCullOffsets))
else:
print('offsets culling with nsig {} maxrms {}: {} left after culling, success'.format(nsig, maxrms, numCullOffsets))
breakFlag = 1
break
if breakFlag == 1:
break
if numCullOffsets < numThreshold:
refinedOffsets = None
else:
refinedOffsets = readOffset(cullOffsetFile)
os.remove(offsetFile)
os.remove(cullOffsetFile)
return refinedOffsets
def meanOffset(offsets):
rangeOffset = 0.0
azimuthOffset = 0.0
i = 0
for offsetx in offsets:
i += 1
rangeOffset += offsetx.dx
azimuthOffset += offsetx.dy
rangeOffset /= i
azimuthOffset /= i
return (rangeOffset, azimuthOffset)
def fitOffset(inputOffset, order=1, axis='range'):
'''fit a polynomial to the offset
order=0 also works, output is mean offset
'''
import numpy as np
index = []
offset = []
for a in inputOffset:
if axis=='range':
index.append(a.x)
offset.append(a.dx)
else:
index.append(a.y)
offset.append(a.dy)
p = np.polyfit(index, offset, order)
return list(p[::-1])
def topo(swath, track, demFile, latFile, lonFile, hgtFile, losFile=None, incFile=None, mskFile=None, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.topozero import createTopozero
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
demImage = isceobj.createDemImage()
demImage.load(demFile + '.xml')
demImage.setAccessMode('read')
#####Run Topo
planet = Planet(pname='Earth')
topo = createTopozero()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.wireInputPort(name='dem', object=demImage)
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1 #must be set as 1
topo.numberAzimuthLooks = 1 #must be set as 1 Cunren
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0/swath.prf)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0 * swath.rangePixelSize
else:
topo.sensingStart = swath.sensingStart
topo.rangeFirstSample = swath.startingRange
topo.demInterpolationMethod='BIQUINTIC'
topo.latFilename = latFile
topo.lonFilename = lonFile
topo.heightFilename = hgtFile
if losFile != None:
topo.losFilename = losFile
if incFile != None:
topo.incFilename = incFile
if mskFile != None:
topo.maskFilename = mskFile
topo.topo()
return list(topo.snwe)
def geo2rdr(swath, track, latFile, lonFile, hgtFile, rangeOffsetFile, azimuthOffsetFile, numberRangeLooks=1, numberAzimuthLooks=1, multilookTimeOffset=True):
import datetime
import isceobj
from zerodop.geo2rdr import createGeo2rdr
from isceobj.Planet.Planet import Planet
pointingDirection = {'right': -1, 'left' :1}
latImage = isceobj.createImage()
latImage.load(latFile + '.xml')
latImage.setAccessMode('read')
lonImage = isceobj.createImage()
lonImage.load(lonFile + '.xml')
lonImage.setAccessMode('read')
hgtImage = isceobj.createDemImage()
hgtImage.load(hgtFile + '.xml')
hgtImage.setAccessMode('read')
planet = Planet(pname='Earth')
topo = createGeo2rdr()
topo.configure()
topo.slantRangePixelSpacing = numberRangeLooks * swath.rangePixelSize
topo.prf = 1.0 / (numberAzimuthLooks * swath.azimuthLineInterval)
topo.radarWavelength = track.radarWavelength
topo.orbit = track.orbit
topo.width = int(swath.numberOfSamples/numberRangeLooks)
topo.length = int(swath.numberOfLines/numberAzimuthLooks)
topo.demLength = hgtImage.length
topo.demWidth = hgtImage.width
topo.wireInputPort(name='planet', object=planet)
topo.numberRangeLooks = 1
topo.numberAzimuthLooks = 1 #must be set to be 1
topo.lookSide = pointingDirection[track.pointingDirection]
if multilookTimeOffset == True:
topo.sensingStart = swath.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*swath.azimuthLineInterval)
topo.rangeFirstSample = swath.startingRange + (numberRangeLooks-1.0)/2.0*swath.rangePixelSize
else:
topo.setSensingStart(swath.sensingStart)
topo.rangeFirstSample = swath.startingRange
topo.dopplerCentroidCoeffs = [0.] #we are using zero doppler geometry
topo.demImage = hgtImage
topo.latImage = latImage
topo.lonImage = lonImage
topo.rangeOffsetImageName = rangeOffsetFile
topo.azimuthOffsetImageName = azimuthOffsetFile
topo.geo2rdr()
return
def waterBodyRadar(latFile, lonFile, wbdFile, wbdOutFile):
'''
create water boday in radar coordinates
'''
import numpy as np
import isceobj
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
demImage = isceobj.createDemImage()
demImage.load(wbdFile + '.xml')
#demImage.setAccessMode('read')
wbd=np.memmap(wbdFile, dtype='byte', mode='r', shape=(demImage.length, demImage.width))
image = isceobj.createImage()
image.load(latFile+'.xml')
width = image.width
length = image.length
latFp = open(latFile, 'rb')
lonFp = open(lonFile, 'rb')
wbdOutFp = open(wbdOutFile, 'wb')
wbdOutIndex = np.arange(width, dtype=np.int32)
print("create water body in radar coordinates...")
for i in range(length):
if (((i+1)%200) == 0):
print("processing line %6d of %6d" % (i+1, length), end='\r', flush=True)
wbdOut = np.zeros(width, dtype='byte')-2
lat = np.fromfile(latFp, dtype=np.float64, count=width)
lon = np.fromfile(lonFp, dtype=np.float64, count=width)
#indexes start with zero
lineIndex = np.int32((lat - demImage.firstLatitude) / demImage.deltaLatitude + 0.5)
sampleIndex = np.int32((lon - demImage.firstLongitude) / demImage.deltaLongitude + 0.5)
inboundIndex = np.logical_and(
np.logical_and(lineIndex>=0, lineIndex<=demImage.length-1),
np.logical_and(sampleIndex>=0, sampleIndex<=demImage.width-1)
)
#keep SRTM convention. water body. (0) --- land; (-1) --- water; (-2 or other value) --- no data.
wbdOut[(wbdOutIndex[inboundIndex],)] = wbd[(lineIndex[inboundIndex], sampleIndex[inboundIndex])]
wbdOut.astype(np.int8).tofile(wbdOutFp)
print("processing line %6d of %6d" % (length, length))
#create_xml(wbdOutFile, width, length, 'byte')
image = isceobj.createImage()
image.setDataType('BYTE')
image.addDescription('water body. (0) --- land; (-1) --- water; (-2) --- no data.')
image.setFilename(wbdOutFile)
image.extraFilename = wbdOutFile + '.vrt'
image.setWidth(width)
image.setLength(length)
image.renderHdr()
del wbd, demImage, image
latFp.close()
lonFp.close()
wbdOutFp.close()
def renameFile(oldname, newname):
import os
import isceobj
img = isceobj.createImage()
img.load(oldname + '.xml')
img.setFilename(newname)
img.extraFilename = newname+'.vrt'
img.renderHdr()
os.rename(oldname, newname)
os.remove(oldname + '.xml')
os.remove(oldname + '.vrt')
def cal_coherence(inf, win=5, edge=0):
'''
compute coherence uisng only interferogram (phase).
This routine still follows the regular equation for computing coherence,
but assumes the amplitudes of reference and secondary are one, so that coherence
can be computed using phase only.
inf: interferogram
win: window size
edge: 0: remove all non-full convolution samples
1: remove samples computed from less than half convolution
(win=5 used to illustration below)
* * *
* * *
* * *
* * *
* * *
2: remove samples computed from less than quater convolution
(win=5 used to illustration below)
* * *
* * *
* * *
3: remove non-full convolution samples on image edges
4: keep all samples
'''
import numpy as np
import scipy.signal as ss
if win % 2 != 1:
raise Exception('window size must be odd!')
hwin = np.int(np.around((win - 1) / 2))
filt = np.ones((win, win))
amp = np.absolute(inf)
cnt = ss.convolve2d((amp!=0), filt, mode='same')
cor = ss.convolve2d(inf/(amp + (amp==0)), filt, mode='same')
cor = (amp!=0) * np.absolute(cor) / (cnt + (cnt==0))
#trim edges
if edge == 0:
num = win * win
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 1:
num = win * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 2:
num = (hwin+1) * (hwin+1)
cor[np.nonzero(cnt < num)] = 0.0
elif edge == 3:
cor[0:hwin, :] = 0.0
cor[-hwin:, :] = 0.0
cor[:, 0:hwin] = 0.0
cor[:, -hwin:] = 0.0
else:
pass
#print("coherence, max: {} min: {}".format(np.max(cor[np.nonzero(cor!=0)]), np.min(cor[np.nonzero(cor!=0)])))
return cor
def snaphuUnwrap(track, t, wrapName, corName, unwrapName, nrlks, nalks, costMode = 'DEFO',initMethod = 'MST', defomax = 4.0, initOnly = False):
#runUnwrap(self, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
'''
track: track object
t: time for computing earth radius and altitude, normally mid azimuth time
wrapName: input interferogram
corName: input coherence file
unwrapName: output unwrapped interferogram
nrlks: number of range looks of the interferogram
nalks: number of azimuth looks of the interferogram
'''
import datetime
import numpy as np
import isceobj
from contrib.Snaphu.Snaphu import Snaphu
from isceobj.Planet.Planet import Planet
corImg = isceobj.createImage()
corImg.load(corName + '.xml')
width = corImg.width
length = corImg.length
#get altitude
orbit = track.orbit
peg = orbit.interpolateOrbit(t, method='hermite')
refElp = Planet(pname='Earth').ellipsoid
llh = refElp.xyz_to_llh(peg.getPosition())
hdg = orbit.getENUHeading(t)
refElp.setSCH(llh[0], llh[1], hdg)
earthRadius = refElp.pegRadCur
altitude = llh[2]
rangeLooks = nrlks
azimuthLooks = nalks
azfact = 0.8
rngfact = 0.8
corrLooks = rangeLooks * azimuthLooks/(azfact*rngfact)
maxComponents = 20
snp = Snaphu()
snp.setInitOnly(initOnly)
snp.setInput(wrapName)
snp.setOutput(unwrapName)
snp.setWidth(width)
snp.setCostMode(costMode)
snp.setEarthRadius(earthRadius)
snp.setWavelength(track.radarWavelength)
snp.setAltitude(altitude)
snp.setCorrfile(corName)
snp.setInitMethod(initMethod)
snp.setCorrLooks(corrLooks)
snp.setMaxComponents(maxComponents)
snp.setDefoMaxCycles(defomax)
snp.setRangeLooks(rangeLooks)
snp.setAzimuthLooks(azimuthLooks)
if corImg.bands == 1:
snp.setCorFileFormat('FLOAT_DATA')
snp.prepare()
snp.unwrap()
######Render XML
outImage = isceobj.Image.createUnwImage()
outImage.setFilename(unwrapName)
outImage.setWidth(width)
outImage.setAccessMode('read')
outImage.renderVRT()
outImage.createImage()
outImage.finalizeImage()
outImage.renderHdr()
#####Check if connected components was created
if snp.dumpConnectedComponents:
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
del connImage
del corImg
del snp
del outImage
#remove wired things in no-data area
amp=np.memmap(unwrapName, dtype='float32', mode='r+', shape=(length*2, width))
wrap = np.fromfile(wrapName, dtype=np.complex64).reshape(length, width)
(amp[0:length*2:2, :])[np.nonzero(wrap==0)]=0
(amp[1:length*2:2, :])[np.nonzero(wrap==0)]=0
del amp
del wrap
return
def snaphuUnwrapOriginal(wrapName, corName, ampName, unwrapName, costMode = 's', initMethod = 'mcf', snaphuConfFile = 'snaphu.conf'):
'''
unwrap interferogram using original snaphu program
'''
import numpy as np
import isceobj
corImg = isceobj.createImage()
corImg.load(corName + '.xml')
width = corImg.width
length = corImg.length
#specify coherence file format in configure file
#snaphuConfFile = 'snaphu.conf'
if corImg.bands == 1:
snaphuConf = '''CORRFILEFORMAT FLOAT_DATA
CONNCOMPFILE {}
MAXNCOMPS 20'''.format(unwrapName+'.conncomp')
else:
snaphuConf = '''CORRFILEFORMAT ALT_LINE_DATA
CONNCOMPFILE {}
MAXNCOMPS 20'''.format(unwrapName+'.conncomp')
with open(snaphuConfFile, 'w') as f:
f.write(snaphuConf)
cmd = 'snaphu {} {} -f {} -{} -o {} -a {} -c {} -v --{}'.format(
wrapName,
width,
snaphuConfFile,
costMode,
unwrapName,
ampName,
corName,
initMethod
)
runCmd(cmd)
create_xml(unwrapName, width, length, 'unw')
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
del connImage
#remove wired things in no-data area
amp=np.memmap(unwrapName, dtype='float32', mode='r+', shape=(length*2, width))
wrap = np.fromfile(wrapName, dtype=np.complex64).reshape(length, width)
(amp[0:length*2:2, :])[np.nonzero(wrap==0)]=0
(amp[1:length*2:2, :])[np.nonzero(wrap==0)]=0
del amp
del wrap
return
def getBboxGeo(track, useTrackOnly=False, numberOfSamples=1, numberOfLines=1, numberRangeLooks=1, numberAzimuthLooks=1):
'''
get bounding box in geo-coordinate
'''
import numpy as np
pointingDirection = {'right': -1, 'left' :1}
if useTrackOnly:
import datetime
rangeMin = track.startingRange + (numberRangeLooks-1.0)/2.0*track.rangePixelSize
rangeMax = rangeMin + (numberOfSamples-1) * numberRangeLooks * track.rangePixelSize
azimuthTimeMin = track.sensingStart + datetime.timedelta(seconds=(numberAzimuthLooks-1.0)/2.0*track.azimuthLineInterval)
azimuthTimeMax = azimuthTimeMin + datetime.timedelta(seconds=(numberOfLines-1) * numberAzimuthLooks * track.azimuthLineInterval)
bboxRdr = [rangeMin, rangeMax, azimuthTimeMin, azimuthTimeMax]
else:
bboxRdr = getBboxRdr(track)
rangeMin = bboxRdr[0]
rangeMax = bboxRdr[1]
azimuthTimeMin = bboxRdr[2]
azimuthTimeMax = bboxRdr[3]
#get bounding box using Piyush's code
hgtrange=[-500,9000]
ts = [azimuthTimeMin, azimuthTimeMax]
rngs = [rangeMin, rangeMax]
pos = []
for ht in hgtrange:
for tim in ts:
for rng in rngs:
llh = track.orbit.rdr2geo(tim, rng, height=ht, side=pointingDirection[track.pointingDirection])
pos.append(llh)
pos = np.array(pos)
# S N W E
bbox = [np.min(pos[:,0]), np.max(pos[:,0]), np.min(pos[:,1]), np.max(pos[:,1])]
return bbox
def getBboxRdr(track):
'''
get bounding box in radar-coordinate
'''
import datetime
numberOfFrames = len(track.frames)
numberOfSwaths = len(track.frames[0].swaths)
sensingStartList = []
sensingEndList = []
startingRangeList = []
endingRangeList = []
for i in range(numberOfFrames):
for j in range(numberOfSwaths):
swath = track.frames[i].swaths[j]
sensingStartList.append(swath.sensingStart)
sensingEndList.append(swath.sensingStart + datetime.timedelta(seconds=(swath.numberOfLines-1) * swath.azimuthLineInterval))
startingRangeList.append(swath.startingRange)
endingRangeList.append(swath.startingRange + (swath.numberOfSamples - 1) * swath.rangePixelSize)
azimuthTimeMin = min(sensingStartList)
azimuthTimeMax = max(sensingEndList)
azimuthTimeMid = azimuthTimeMin+datetime.timedelta(seconds=(azimuthTimeMax-azimuthTimeMin).total_seconds()/2.0)
rangeMin = min(startingRangeList)
rangeMax = max(endingRangeList)
rangeMid = (rangeMin + rangeMax) / 2.0
bbox = [rangeMin, rangeMax, azimuthTimeMin, azimuthTimeMax]
return bbox
def filterInterferogram(data, alpha, windowSize, stepSize):
'''
a filter wrapper
'''
import os
import numpy as np
from contrib.alos2filter.alos2filter import psfilt1
(length, width)=data.shape
data.astype(np.complex64).tofile('tmp1234.int')
psfilt1('tmp1234.int', 'filt_tmp1234.int', width, alpha, windowSize, stepSize)
data2 = np.fromfile('filt_tmp1234.int', dtype=np.complex64).reshape(length, width)
os.remove('tmp1234.int')
os.remove('filt_tmp1234.int')
return data2
###################################################################
# these are routines for burst-by-burst ScanSAR interferometry
###################################################################
def mosaicBurstInterferogram(swath, burstPrefix, outputFile, numberOfLooksThreshold=1):
'''
take a burst sequence and output mosaicked file
'''
import numpy as np
interferogram = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.complex64)
cnt = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.int8)
for i in range(swath.numberOfBursts):
burstFile = burstPrefix + '_%02d.int'%(i+1)
burstInterferogram = np.fromfile(burstFile, dtype=np.complex64).reshape(swath.burstSlcNumberOfLines, swath.burstSlcNumberOfSamples)
interferogram[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += burstInterferogram
cnt[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += (burstInterferogram!=0)
#trim upper and lower edges with less number of looks
#############################################################################
firstLine = 0
for i in range(swath.numberOfLines):
if np.sum(cnt[i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
firstLine = i
break
lastLine = swath.numberOfLines - 1
for i in range(swath.numberOfLines):
if np.sum(cnt[swath.numberOfLines-1-i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
lastLine = swath.numberOfLines-1-i
break
interferogram[:firstLine,:]=0
interferogram[lastLine+1:,:]=0
# if numberOfLooksThreshold!= None:
# interferogram[np.nonzero(cnt<numberOfLooksThreshold)] = 0
#############################################################################
interferogram.astype(np.complex64).tofile(outputFile)
create_xml(outputFile, swath.numberOfSamples, swath.numberOfLines, 'int')
def mosaicBurstAmplitude(swath, burstPrefix, outputFile, numberOfLooksThreshold=1):
'''
take a burst sequence and output the magnitude
'''
import numpy as np
amp = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.float32)
cnt = np.zeros((swath.numberOfLines, swath.numberOfSamples), dtype=np.int8)
for i in range(swath.numberOfBursts):
burstFile = burstPrefix + '_%02d.slc'%(i+1)
#azLineOffset = round((swath.burstSlcStartTimes[i] - swath.burstSlcStartTimes[0]).total_seconds() / swath.azimuthLineInterval)
burstMag = np.absolute(np.fromfile(burstFile, dtype=np.complex64).reshape(swath.burstSlcNumberOfLines, swath.burstSlcNumberOfSamples))
burstPwr = burstMag * burstMag
amp[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += burstPwr
cnt[0+swath.burstSlcFirstLineOffsets[i]:swath.burstSlcNumberOfLines+swath.burstSlcFirstLineOffsets[i], :] += (burstPwr!=0)
#trim upper and lower edges with less number of looks
#############################################################################
firstLine = 0
for i in range(swath.numberOfLines):
if np.sum(cnt[i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
firstLine = i
break
lastLine = swath.numberOfLines - 1
for i in range(swath.numberOfLines):
if np.sum(cnt[swath.numberOfLines-1-i,:]>=numberOfLooksThreshold) > swath.numberOfSamples/2:
lastLine = swath.numberOfLines-1-i
break
amp[:firstLine,:]=0
amp[lastLine+1:,:]=0
# if numberOfLooksThreshold!= None:
# amp[np.nonzero(cnt<numberOfLooksThreshold)] = 0
#############################################################################
np.sqrt(amp).astype(np.float32).tofile(outputFile)
create_xml(outputFile, swath.numberOfSamples, swath.numberOfLines, 'float')
def resampleBursts(referenceSwath, secondarySwath,
referenceBurstDir, secondaryBurstDir, secondaryBurstResampledDir, interferogramDir,
referenceBurstPrefix, secondaryBurstPrefix, secondaryBurstResampledPrefix, interferogramPrefix,
rangeOffset, azimuthOffset, rangeOffsetResidual=0, azimuthOffsetResidual=0):
import os
import datetime
import numpy as np
import numpy.matlib
from contrib.alos2proc.alos2proc import resamp
os.makedirs(secondaryBurstResampledDir, exist_ok=True)
os.makedirs(interferogramDir, exist_ok=True)
#get burst file names
referenceBurstSlc = [referenceBurstPrefix+'_%02d.slc'%(i+1) for i in range(referenceSwath.numberOfBursts)]
secondaryBurstSlc = [secondaryBurstPrefix+'_%02d.slc'%(i+1) for i in range(secondarySwath.numberOfBursts)]
secondaryBurstSlcResampled = [secondaryBurstPrefix+'_%02d.slc'%(i+1) for i in range(referenceSwath.numberOfBursts)]
interferogram = [interferogramPrefix+'_%02d.int'%(i+1) for i in range(referenceSwath.numberOfBursts)]
length = referenceSwath.burstSlcNumberOfLines
width = referenceSwath.burstSlcNumberOfSamples
lengthSecondary = secondarySwath.burstSlcNumberOfLines
widthSecondary = secondarySwath.burstSlcNumberOfSamples
#secondary burst slc start times
secondaryBurstStartTimesSlc = [secondarySwath.firstBurstSlcStartTime + \
datetime.timedelta(seconds=secondarySwath.burstSlcFirstLineOffsets[i]*secondarySwath.azimuthLineInterval) \
for i in range(secondarySwath.numberOfBursts)]
#secondary burst raw start times
secondaryBurstStartTimesRaw = [secondarySwath.firstBurstRawStartTime + \
datetime.timedelta(seconds=i*secondarySwath.burstCycleLength/secondarySwath.prf) \
for i in range(secondarySwath.numberOfBursts)]
for i in range(referenceSwath.numberOfBursts):
##########################################################################
# 1. get offsets and corresponding secondary burst
##########################################################################
#range offset
with open(rangeOffset, 'rb') as f:
f.seek(referenceSwath.burstSlcFirstLineOffsets[i] * width * np.dtype(np.float32).itemsize, 0)
rgoffBurst = np.fromfile(f, dtype=np.float32, count=length*width).reshape(length,width)
if type(rangeOffsetResidual) == np.ndarray:
residual = rangeOffsetResidual[0+referenceSwath.burstSlcFirstLineOffsets[i]:length+referenceSwath.burstSlcFirstLineOffsets[i],:]
rgoffBurst[ | np.nonzero(rgoffBurst!=-999999.0) | numpy.nonzero |
# coding: utf8
"""
Sample class
============
Wrapper around a :class:`pandas.DataFrame` for storing point samples.
A sample is given by the data associated to a point,
and the point coordinates in the space of parameters.
The main benefit of this class is to carry feature labels
and to handle I/Os.
The internal dataframe is publicly available.
Class attributes are configured to return array-like objects
(:class:`numpy.ndarray` or :py:class:`list`)
"""
from copy import copy
from numbers import Number
import os
import logging
import numpy as np
import pandas as pd
from ..input_output import formater
class Sample(object):
"""Container class for samples."""
logger = logging.getLogger(__name__)
def __init__(self, space=None, data=None, plabels=None, flabels=None,
psizes=None, fsizes=None, pformat='json', fformat='json'):
"""Initialize the container and build the column index.
This index carries feature names. Features can be scalars or vectors.
Vector features do not need to be of the same size.
Samples are stored as a 2D row-major array: 1 sample per row.
:param array-like space: parameter space (1 point per sample)
:param array-like data: data associated to points
:param list(str) plabels: parameter names (for space)
:param list(str) flabels: feature names (for data)
:param list(int) psizes: lengths of parameters (for space)
:param list(int) fsizes: lengths of features (for data)
:param str pformat: file format name for space
:param str fformat: file format name for data
"""
# space dataframe
df_space = None
if space is not None:
df_space = create_dataframe(space, clabel='space', flabels=plabels,
fsizes=psizes)
elif ((plabels is not None and list(plabels))
or (psizes is not None and list(psizes))):
index = create_index(clabel='space', flabels=plabels, fsizes=psizes)
df_space = pd.DataFrame(columns=index)
# data dataframe
df_data = None
if data is not None:
df_data = create_dataframe(data, clabel='data', flabels=flabels,
fsizes=fsizes)
elif ((flabels is not None and list(flabels))
or (fsizes is not None and list(fsizes))):
index = create_index(clabel='data', flabels=flabels, fsizes=fsizes)
df_data = pd.DataFrame(columns=index)
# concatenate
try:
self._dataframe = pd.concat([df_space, df_data], axis=1)
except ValueError:
self._dataframe = pd.DataFrame()
# I/O formaters
self._pformater = formater(pformat)
self._fformater = formater(fformat)
self.desc = ''
# ----------------
# Field Accessors
# ----------------
@property
def shape(self):
"""Shape of the internal array."""
return self._dataframe.shape
@property
def plabels(self):
"""List of space feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._dataframe['space'].columns
except KeyError:
return []
else:
uniq, pos = np.unique(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def flabels(self):
"""List of data feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._dataframe['data'].columns
except KeyError:
return []
else:
uniq, pos = np.unique(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def psizes(self):
"""Sizes of space features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._dataframe['space'].columns
except KeyError:
return []
else:
_, sizes = np.unique(index.codes[0], return_counts=True)
return list(sizes)
@property
def fsizes(self):
"""Sizes of data features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._dataframe['data'].columns
except KeyError:
return []
else:
_, sizes = | np.unique(index.codes[0], return_counts=True) | numpy.unique |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" matrix operator """
from copy import deepcopy
from functools import reduce
import logging
import numpy as np
from scipy import sparse as scisparse
from scipy import linalg as scila
from qiskit import QuantumCircuit # pylint: disable=unused-import
from qiskit.aqua import AquaError
from .base_operator import BaseOperator
logger = logging.getLogger(__name__)
class MatrixOperator(BaseOperator):
"""
Operators relevant for quantum applications
Note:
For grouped paulis representation, all operations will always convert
it to paulis and then convert it back.
(It might be a performance issue.)
"""
def __init__(self, matrix, basis=None, z2_symmetries=None, atol=1e-12, name=None):
"""
Args:
matrix (numpy.ndarray or scipy.sparse.csr_matrix):
a 2-D sparse matrix represents operator (using CSR format internally)
basis (list[tuple(object, [int])], optional): the grouping basis, each element is a
tuple composed of the basis
and the indices to paulis which are
belonged to that group.
e.g., if tpb basis is used, the object
will be a pauli.
by default, the group is equal to
non-grouping, each pauli is its own basis.
z2_symmetries (Z2Symmetries): represent the Z2 symmetries
atol (float): atol
name (str): name
"""
super().__init__(basis, z2_symmetries, name)
if matrix is not None:
matrix = matrix if scisparse.issparse(matrix) else scisparse.csr_matrix(matrix)
matrix = matrix if scisparse.isspmatrix_csr(matrix) else matrix.to_csr(copy=True)
self._matrix = matrix
self._atol = atol
@property
def atol(self):
""" return atol """
return self._atol
@atol.setter
def atol(self, new_value):
""" sets atol """
self._atol = new_value
def add(self, other, copy=False):
""" add """
out = self.copy() if copy else self
out._matrix += other._matrix
return out
def sub(self, other, copy=False):
""" sub """
out = self.copy() if copy else self
out._matrix -= other._matrix
return out
def __add__(self, other):
"""Overload + operation"""
return self.add(other, copy=True)
def __iadd__(self, other):
"""Overload += operation"""
return self.add(other, copy=False)
def __sub__(self, other):
"""Overload - operation"""
return self.sub(other, copy=True)
def __isub__(self, other):
"""Overload -= operation"""
return self.sub(other, copy=False)
def __neg__(self):
"""Overload unary - """
out = self.copy()
out._matrix *= -1.0
return out
def __eq__(self, other):
"""Overload == operation"""
return np.all(self._matrix == other.matrix)
def __str__(self):
"""Overload str()"""
curr_repr = 'matrix'
length = "{}x{}".format(2 ** self.num_qubits, 2 ** self.num_qubits)
ret = "Representation: {}, qubits: {}, size: {}".format(curr_repr, self.num_qubits, length)
return ret
def copy(self):
"""Get a copy of self."""
return deepcopy(self)
def chop(self, threshold=None, copy=False):
"""
Eliminate the real and imagine part of coeff in each pauli by `threshold`.
If pauli's coeff is less then `threshold` in both real and imagine parts,
the pauli is removed.
To align the internal representations, all available representations are chopped.
The chopped result is stored back to original property.
Note: if coeff is real-only, the imag part is skipped.
Args:
threshold (float): threshold chops the paulis
copy (bool): copy or self
Returns:
MatrixOperator: self or copy
"""
threshold = self._atol if threshold is None else threshold
def chop_real_imag(coeff):
temp_real = coeff.real if np.absolute(coeff.real) >= threshold else 0.0
temp_imag = coeff.imag if | np.absolute(coeff.imag) | numpy.absolute |
"""
Classes to perform actions on simulation trajectories
"""
from __future__ import division, print_function, absolute_import
import os
from collections import namedtuple
import MDAnalysis as md
import MDAnalysis.core.AtomGroup as AtomGroup
import MDAnalysis.analysis.align as align
import MDAnalysis.lib.util as mdutil
import MDAnalysis.lib.mdamath as mdmath
import numpy as np
try :
import pyvoro
except:
pass
from scipy.spatial.distance import cdist
import sklearn.mixture as mixture
from simanalysis.pbc import make_whole_xyz, unwrap_vector
from simanalysis.groups import read_groups
from simanalysis.utils import resanallib, AnalysisGrid
MDRecord = namedtuple("MDRecord",["time","value"])
ResidueAtoms = namedtuple("ResidueAtoms",["first","last"])
class TrajectoryAction(object):
"""
Base class for actions that are called by a TrajectoryProcessor
object. Classes that wants to implement a particular action
should inherit from this.
"""
def __init__(self, processor):
self.processor = processor
self.dosubsample = False
processor.append_action(self)
@staticmethod
def descr() :
return "This is generic description"
@classmethod
def command_name(cls) :
name = cls.__name__.lower()
if name.endswith("analysis") :
name = name[:-8]
return name
def add_arguments(self, parser):
"""
Function that is called to add action-specific arguments
Arguments
---------
parser : argparse.ArgumentParser
the parser to add command-line arguments to
"""
pass
def setup(self, args):
"""
Function that is called after the processor has parsed the command-line
arguments.
Arguments
---------
args : argparse.Namespace
the parsed arguments
"""
def process(self):
"""
Main processing function called at each trajectory timestep
"""
pass
def subsample(self):
"""
Function called occasionally if subsampling is turned on
"""
pass
def finalize(self):
"""
Function that is called after all timesteps has been processed
"""
pass
def _write_records(self, postfix='', headers=None):
"""
Helper routine to write out a list of MDRecord to disc
"""
if self.records :
with open(self.out+postfix,'w') as f :
if headers is not None :
f.write("#"+"\t".join(headers)+"\n")
for entry in self.records:
if isinstance(entry.value,float) or isinstance(entry.value,np.float32):
f.write("%.0f\t%.3f\n"%(entry.time,entry.value))
elif isinstance(entry.value,int) or isinstance(entry.value,np.int32):
f.write("%.0f\t%d\n"%(entry.time,entry.value))
else:
f.write("%.0f\t%s\n"%(entry.time,"\t".join("%.3f"%v for v in entry.value)))
class CenterWholeAlign(TrajectoryAction):
"""
Class to make MD snapshots whole over periodic boxes and to centre and
align proteins.
Attributes
----------
protsel : MDAnalysis.AtomGroup
the protein selection
refuni : MDAnalyis.Universe
the reference universe used for alignment
residue_atoms : list of integer tuples
the atom numbers of all residues excluding proteins
residues : list of MDAnalysis.AtomGroup
the residues in the universe excluding proteins
records : list of MDRecords
the RMSD record at each processes snapshot
writer : MDAnalysis.Writer
the output trajectory writer
"""
@staticmethod
def descr() :
return "Make snapshots whole and centre and align proteins"
def add_arguments(self, parser):
parser.add_argument('--bbmask',help="the selectiom mask for backbone",default="name CA")
parser.add_argument('--pmask',help="the selectiom mask for protein",default="protein")
parser.add_argument('-o','--out',help="the output",default="centerwhole")
parser.add_argument('--noalign',action="store_true",help="turns off alignment",default=False)
parser.add_argument('--nocenter',action="store_true",help="turns off centering",default=False)
parser.add_argument('--nowhole',action="store_true",help="turns off making whole",default=False)
def setup(self, args):
self.refuni = md.Universe(self.processor.args.struct)
self.protsel = self.processor.universe.select_atoms(args.pmask)
if len(self.protsel) == 0 :
self.nocenter = True
self.noalign = True
else:
self.nocenter = args.nocenter
self.noalign = args.noalign
self.nowhole = args.nowhole
self.residues = []
self.residue_atoms = []
for res in self.processor.universe.select_atoms("not "+args.pmask).residues:
if len(res.atoms) > 1 :
self.residues.append(res)
self.residue_atoms.append(ResidueAtoms(res.atoms[0].index,res.atoms[-1].index))
self.records = []
self.writer = md.Writer(args.out,
self.processor.universe.trajectory.n_atoms)
self.out = args.out
self.bbmask = args.bbmask
def process(self):
if not self.nowhole :
if len(self.protsel) > 0:
xyz = pbc.make_whole_xyz(self.protsel.positions,self.processor.currbox)
self.protsel.positions = xyz
for res in self.residues :
xyz = pbc.make_whole_xyz(res.atoms.positions,self.processor.currbox)
res.atoms.positions = xyz
if not self.nocenter :
self._center()
if not self.noalign :
rmsd = align.alignto(self.processor.universe, self.refuni,
select=self.bbmask)[1]
self.records.append(MDRecord(self.processor.currtime,rmsd))
self.writer.write(self.processor.currsnap)
def finalize(self):
"""
Write out the RMSDs to disc and close the output trajectory
"""
self._write_records(postfix="_rmsd.txt")
try :
self.writer.close_trajectory()
except :
pass
def _center(self) :
xyz = self.processor.currsnap._pos
#com1 = xyz[self.protsel[0].number:self.protsel[-1].number+1].mean(axis=0)
com1 = self.protsel.center_of_geometry()
for residue in self.residue_atoms :
com2 = xyz[residue.first:residue.last+1].mean(axis=0)
dr = pbc.unwrap_vector(com1 - com2, self.processor.currbox)
xyz[residue.first:residue.last+1] = xyz[residue.first:residue.last+1] + dr
delta = com1 - self.processor.currbox/2.0
self.processor.currsnap._pos = xyz - delta
MDGroupSelection = namedtuple("MDGroupSelection",["atomgroup", "indices", "transmat"])
class ChainOrderAnalysis(TrajectoryAction):
"""
Class to analyse chain order parameters during a trajectory
Attributes:
-----------
normal : numpy ndarray
the normal of the membrane, assumed to be z-axis
out : string
the output filename
selections : list of MDAnalysis.AtomGroup
the selections
records : list of MDRecord
the chain orders at each timestep
"""
@staticmethod
def descr() :
return "Analyze chain order parameters"
def add_arguments(self, parser):
parser.add_argument('--selections',nargs="+", help="the chains")
parser.add_argument('--analysis',choices=["CC","CH"], help="the type of analysis C-C or C-H", default="CC")
parser.add_argument('--groups', help="group definitions for pseudo-atom calculation")
parser.add_argument('--gridout', help="the prefix for the filename of a 2D grid")
parser.add_argument('--protmask',help="the selectiom mask for lipid residues")
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('-o', '--out', help="the output prefix", default="order")
def setup(self, args):
def _get_h(atomgrp):
for atom2 in atomgrp[0].bonded_atoms :
if atom2.mass < 5.0 :
return self.processor.universe.select_atoms("resname %s and name %s"%(atomgrp[0].resname,atom2.name))
raise Exception("Could not find any H atom bonded to %s in %s"%(atomgrp[0].name,atomgrp[0].resname))
def _enumerateatoms(resname, atomstr) :
lipid = self.processor.universe.select_atoms("resname %s"%resname)[0].residue
base = atomstr[:-1]
atomi = int(atomstr[-1])
lst = []
while True :
try :
name = "%s%d"%(base,atomi)
dummy = lipid[name]
lst.append(name)
atomi += 1
except :
break
return lst
def _expandlist(liststr):
l, r = liststr.split("..")
i = l.find("(")
start = int(l[i+1:])
l = l[:i]
i = r.find(")")
end = int(r[:i])
r = r[i+1:]
return ["%s%d%s"%(l,i,r) for i in range(start,end+1)]
self.headers = ["Time"]
self.selheaders = []
self.selections = []
self.analtype = args.analysis
if self.analtype == "CH" :
self.hselections = []
self.resgroups = None
if args.groups is not None:
if self.analtype == "CH" :
raise Exception("Cannot perform C-H analysis on pseudo-atoms")
self.resgroups = read_groups(args.groups)
for selin in args.selections:
resname, chainlist = selin.split(":")
if self.resgroups is not None:
if resname not in self.resgroups:
raise Exception("Cannot find %s in groups spec."%resname)
pseudoatoms = [group.name for group in self.resgroups[resname].groups]
if chainlist.find("-") > -1:
atomlist = chainlist.split("-")
elif chainlist.find("..") > -1:
atomlist = _expandlist(chainlist)
elif chainlist.startswith("@") :
atomlist = _enumerateatoms(resname, chainlist[1:])
else:
raise Exception("Atom list need be specified with '-' or with expansion '(..)'")
if self.resgroups is None:
atomsels = [self.processor.universe.select_atoms("resname %s and name %s"%(resname,atom))
for atom in atomlist]
print("%s (%s) - %d atoms and %d atoms in first selection"% \
(resname, ",".join(atomlist), len(atomlist), len(atomsels[0])))
for atomgrp, atom in zip(atomsels[1:], atomlist[1:]):
if len(atomgrp) != len(atomsels[0]):
raise Exception("Selection for %s is different in length than the first selection"%atom)
self.selections.append(atomsels)
else:
for atom in atomlist:
if atom not in pseudoatoms :
raise Exception("Could not find selected atom %s in the group spec."%atom)
# Select all atoms for the selected residue, the coordinates
# will be transformed to pseudo-atoms
atomsel = self.processor.universe.select_atoms("resname %s"%resname)
atomnames = [atom.name for atom in atomsel.residues[0].atoms]
ngroups = len(self.resgroups[resname].groups)
natoms = len(atomnames)
nres = len(atomsel.residues)
# Create the pseudo atom indices
indices0 = [self.resgroups[resname].indices(atom) for atom in atomlist]
indices = [[i0[0]+ngroups*i for i in range(nres)] for i0 in indices0]
# Create the transformation matrix by replacting the one for the first residue
transmat0 = self.resgroups[resname].transmat(atomnames)
transmat = np.zeros([ngroups*nres,natoms*nres])
for i in range(nres):
transmat[i*ngroups:(i+1)*ngroups,i*natoms:(i+1)*natoms] = transmat0
self.selections.append(MDGroupSelection(atomsel, indices, transmat))
print("%s (%s) - %d atoms and %d atoms in first selection"% \
(resname, ",".join(atomlist), len(atomlist), len(indices[0])))
self.headers.extend(["%s/%s"%(resname, atom) for atom in atomlist])
self.selheaders.append(["%s/%s"%(resname, atom) for atom in atomlist])
if self.analtype == "CH":
hatomsels = [_get_h(atomgrp) for atomgrp in atomsels]
self.hselections.append(hatomsels)
for atomgrp, atom in zip(hatomsels[1:], atomlist[1:]):
if len(atomgrp) != len(hatomsels[0]):
raise Exception("H-selection for %s is different in length than the first selection"%atom)
self.out = args.out
# Assumes that the normal is along the z-axis
self.normal = np.array([0.0,0.0,1.0])
self.records = []
self.gridout = args.gridout
self.phosphorsel = None
if self.gridout is not None :
bounds = np.asarray([[0.0, 0.0, 0.0],self.processor.universe.dimensions[:3]])
self.grid_low = AnalysisGrid(bounds)
self.grid_upp = AnalysisGrid(bounds)
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
if args.protmask is not None :
self.protsel = self.processor.universe.select_atoms(args.protmask)
self.grid_prot = AnalysisGrid(bounds)
self.protone = np.ones(len(self.protsel))
else :
self.protsel = None
def process(self):
mid = None
if self.gridout is not None :
mid = self.phosphorsel.center_of_geometry()
if self.protsel is not None :
self.grid_prot.accumulate(self.protsel.positions-mid, self.protone)
orders = []
if self.analtype == "CC":
if self.resgroups is None:
for selection in self.selections :
for a1, a2 in zip(selection[:-1],selection[1:]):
orders.append(self._calc_order(a1.positions,
a2.positions, self.normal, mid))
else:
if self.processor.nprocessed == 1:
f = open(self.out+"_first_pseudo.xyz", "w")
for selection in self.selections :
xyz = np.dot(selection.transmat, selection.atomgroup.positions)
if self.processor.nprocessed == 1:
for pos in xyz:
f.write("c %.3f %.3f %.3f\n"%(pos[0], pos[1], pos[2]))
for i1, i2 in zip(selection.indices[:-1], selection.indices[1:]):
orders.append(self._calc_order(xyz[i1,:],
xyz[i2,:], self.normal, mid))
if self.processor.nprocessed == 1:
f.close()
elif self.analtype == "CH":
for cselection, hselection in zip(self.selections, self.hselections):
for a1, a2 in zip(cselection, hselection):
orders.append(self._calc_order(a1.positions,
a2.positions, self.normal, mid))
self.records.append(MDRecord(self.processor.currtime, orders))
def _calc_order(self, a1, a2, norm, mid):
# Atom2 - Atom1
vec = a2 - a1
# Projection with normal
proj = np.multiply(vec,norm).sum(axis=1)**2 / np.sum(vec**2,axis=1)
# Discretize on a grid
if self.gridout is not None :
sel_low = self.phosphorsel.positions[:,2] < mid[2]
sel_upp = np.logical_not(sel_low)
coords_upp = self.phosphorsel.positions[sel_upp,:]
coords_low = self.phosphorsel.positions[sel_low,:]
self.grid_low.accumulate(coords_low-mid, proj[sel_low])
self.grid_upp.accumulate(coords_upp-mid, proj[sel_upp])
# return order parameter
return np.abs(0.5*(3.0*proj.mean()-1))
def finalize(self):
self._write_records(postfix="_dt.txt", headers=self.headers)
data = np.asarray([r.value for r in self.records])
av = data.mean(axis=0)
std = data.std(axis=0)
offset = 0
selavs = []
selstds = []
fac = -1 if self.analtype == "CC" else 0
for heads in self.selheaders:
selavs.append(av[offset:offset+len(heads)+fac])
selstds.append(std[offset:offset+len(heads)+fac])
offset += len(heads)+fac
maxatm = max([len(heads) for heads in self.selheaders])+fac
with open(self.out+".txt", "w") as f :
f.write("".join(["\t%s\t\t"%heads[0].split("/")[0] for heads in self.selheaders])+"\n")
for i in range(maxatm):
for j in range(len(self.selheaders)):
if i < len(self.selheaders[j]) :
f.write("%s\t%.3f\t%.3f\t"%(self.selheaders[j][i].split("/")[1],
selavs[j][i],selstds[j][i]))
else:
f.write(" \t \t \t")
f.write("\n")
for avs in selavs:
f.write("Av\t%.3f\t%.3f\t"%(avs.mean(),avs.std()/np.sqrt(avs.shape[0])))
f.write("\n")
if self.gridout is not None :
def order(mat) :
return np.abs(0.5*(3.0*mat-1))
self.grid_low.average(func=order)
self.grid_low.write(self.gridout+"_low.dat")
self.grid_upp.average(func=order)
self.grid_upp.write(self.gridout+"_upp.dat")
if self.protsel is not None :
self.grid_prot.average()
self.grid_prot.write(self.gridout+"_prot.dat")
class IredAnalysis(TrajectoryAction):
"""
Analysis class for iRED
Attributes
----------
atm1 : MDAnalysis.AtomSelection
the first atom making up the iRED vector
atm2 : MDAnalysis.AtomSelection
the second atom making up the iRED vector
mat : numpy.ndarray
the built-up of the correlation matrix
s2list : list
the S2 order parameter for each vector at each subsample point
outname : string
the name of the output file
processor : TrajectoryProcessor object
the trajectory processor calling this analysis
"""
@staticmethod
def descr() :
return "iRED analysis of proteins"
def add_arguments(self, parser):
parser.add_argument('--atoms',nargs=2,help="the atom names making the vectors",default=["N","H"])
parser.add_argument('--pmask',help="the selectiom mask for protein",default="protein")
parser.add_argument('-o','--out',help="the output name",default="s2.txt")
parser.add_argument('--resoffset',type=int,help="the residue offset",default=0)
parser.add_argument('--uselib',choices=["no","dict","me"],help="if to use library vectors",default="no")
def setup(self,args):
protsel = self.processor.universe.select_atoms(args.pmask)
self.uselib = args.uselib
if self.uselib == "no":
self.atm2 = protsel.select_atoms("name "+args.atoms[1])
self.atm1 = protsel.select_atoms("name "+args.atoms[0]+
" and byres name "+args.atoms[1])
elif self.uselib in resanallib :
lib = resanallib[self.uselib]
atm1 = []
atm2 = []
for res in self.processor.universe.select_atoms("protein").residues:
if res.name not in lib :
continue
for atompairs in lib[res.name]:
atm1.append(res[atompairs[0]])
atm2.append(res[atompairs[1]])
self.atm2 = AtomGroup.AtomGroup(atm2)
self.atm1 = AtomGroup.AtomGroup(atm1)
self.mat = np.zeros([len(self.atm1),len(self.atm1)])
self.s2list = []
self.outname = args.out
self.dosubsample = True
self.resoffset = args.resoffset
def process(self):
"""
Building up the correlation matrix, called at each MD snapshot
"""
v1 = self.atm2.positions-self.atm1.positions
vlen = 1.0 / np.sqrt((v1*v1).sum(axis=1))
"""mat2 = np.zeros(mat.shape)
for i in range(nvec):
for j in range(nvec):
mat2[j,i] = np.sum(v1[i]*v1[j])*(vlen[i]*vlen[j]) """
xx1,xx2 = np.meshgrid(v1[:,0],v1[:,0])
yy1,yy2 = np.meshgrid(v1[:,1],v1[:,1])
zz1,zz2 = np.meshgrid(v1[:,2],v1[:,2])
ll1,ll2 = np.meshgrid(vlen,vlen)
mat0 = (xx1*xx2+yy1*yy2+zz1*zz2)*(ll1*ll2)
self.mat += 3.0*mat0*mat0-1
def subsample(self):
"""
Calculating the S2 order parameters and then zero the correlation matrix
"""
self.mat = 0.5*(self.mat / float(self.processor.subsamples))
# Calculating and sorting the eigenvalues and eigenvectors
eval,evec = np.linalg.eig(self.mat)
idx = eval.argsort()[::-1]
eval = eval[idx]
evec = evec[:,idx]
prod = evec*evec
s2 = np.array([1.0-np.sum(eval[5:]*prod[i,5:]) for i in range(prod.shape[0])])
self.s2list.append(s2)
self.mat = np.zeros(self.mat.shape)
def finalize(self):
"""
Write out the order parameters to disc
"""
with open(self.outname,"w") as f :
self.s2list = np.asarray(self.s2list)
frmstr = "%.5f "*self.s2list.shape[0] # ///frmstr%tuple(rs2)
prevres = None
prevatom = None
reslist = []
for i,(atm,rs2) in enumerate(zip(self.atm1,self.s2list.T)):
if self.uselib != "dict":
f.write("%s %d %.5f\n"%(atm.resname,atm.resnum+self.resoffset,rs2.mean()))
#f.write("%s %d %s\n"%(atm.resname,atm.resnum+self.resoffset," ".join("%.5f"%v for v in rs2)))
else :
if reslist and atm.resnum != prevres:
av = np.asarray(reslist).mean()
f.write("%s %d %.5f\n"%(prevatom.resname,prevatom.resnum+self.resoffset,av))
reslist = []
reslist.append(rs2.mean())
prevres = atm.resnum
prevatom = atm
if self.uselib == "dict":
av = np.asarray(reslist).mean()
f.write("%s %d %.5f\n"%(atm.resname,atm.resnum+self.resoffset,av))
class MemBulkAnalysis(TrajectoryAction) :
@staticmethod
def descr() :
return "Analyze the bulk of membrane simulations"
def add_arguments(self, parser):
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('--wmask',help="the selectiom mask for water atoms",default="name OH2")
parser.add_argument('--smask',help="the selectiom mask for solute atoms")
parser.add_argument('--sconc',type=float, help="the target solute concentration",default=1.0)
parser.add_argument('--svol',type=float, help="the solute number volume",default=1.0)
parser.add_argument('--wvol',type=float, help="the water number volume",default=0.0181)
def setup(self, args):
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
self.watersel = self.processor.universe.select_atoms(args.wmask)
self.allsel = self.processor.universe.select_atoms("all")
print("Number of phosphor (%d) and water (%d) atoms"%(
len(self.phosphorsel), len(self.watersel)))
if args.smask is not None :
self.solute = self.processor.universe.select_atoms(args.smask)
print("Number of solute atoms = %d"%len(self.solute))
else :
self.solute = None
self.nphosphor = 1.0 / float(len(self.phosphorsel))
self.nwater = 1.0 / float(len(self.watersel))
# Setup edges to cover the entire simulation box
zpos = self.processor.universe.coord._pos[:,2] - self.allsel.positions[:,2].mean()
self.resolution = 0.25
self.edges = np.arange(zpos.min(),zpos.max()+self.resolution,self.resolution)
self.zvals = 0.5 * (self.edges[:-1] + self.edges[1:]) * 0.1
self.pdensity = np.zeros(self.edges.shape[0]-1)
self.wdensity = np.zeros(self.edges.shape[0]-1)
self.wdensity_now = np.zeros(self.edges.shape[0]-1)
if self.solute is not None :
self.sdensity_now = np.zeros(self.edges.shape[0]-1)
self.sconc = args.sconc
self.svol = args.svol
self.wvol = args.wvol
def process(self) :
zpos = self.phosphorsel.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.pdensity += hist
zpos = self.watersel.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.wdensity += hist
self.wdensity_now = hist
if self.solute is not None :
zpos = self.solute.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.sdensity_curr = hist
def finalize(self):
# Calculate how many water molecules in the bulk
firsti, lasti = self.density_intercept(self.wdensity, self.pdensity)
nbulkwat = int(np.round(self.wdensity_now[:firsti].sum()
+self.wdensity_now[lasti+1:].sum()))
print("Nwat\t%d"%nbulkwat)
# Calculate how many solutes there are outside the membrane
if self.solute is not None :
nbulksol = int(np.round(self.sdensity_now[:firsti].sum()
+self.sdensity_now[lasti+1:].sum()))
else :
nbulksol = 0
print("Nsol\t%d"%nbulksol)
ntot = nbulkwat + nbulksol
ntarget = np.round(ntot*self.wvol/(1.0/self.sconc+self.wvol-self.svol))
nadd = ntarget-nbulksol
if nadd % 2 == 0 :
nadd = (0.5*nadd , 0.5*nadd)
else :
nadd = (np.ceil(0.5*nadd) , np.floor(0.5*nadd))
print("Nadd\t%d\t%d"%nadd)
def density_intercept(self, dens1, dens2) :
"""
"""
n1 = np.sum(dens1)
n2 = np.sum(dens2)
fi = 0
while dens1[fi] / n1 == 0.0 or dens2[fi] / n2 < dens1[fi] / n1 :
fi += 1
li = len(dens1) - 1
while dens1[li] / n1 == 0.0 or dens2[li] / n2 < dens1[li] / n1 :
li -= 1
return fi, li
class MemDensAnalysis(TrajectoryAction) :
@staticmethod
def descr() :
return "Analyze properties based on the membrane density"
def add_arguments(self, parser):
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('--wmask',help="the selectiom mask for water atoms",default="name OH2")
parser.add_argument('--smask',help="the selectiom mask for solute atoms")
parser.add_argument('-o','--out',help="the output prefix",default="memdens")
def setup(self, args):
self.dosubsample = True
self.out = args.out
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
self.watersel = self.processor.universe.select_atoms(args.wmask)
self.allsel = self.processor.universe.select_atoms("all")
print("Number of phosphor (%d) and water (%d) atoms"%(
len(self.phosphorsel), len(self.watersel)))
if args.smask is not None :
self.solute = self.processor.universe.select_atoms(args.smask)
print("Number of solute atoms = %d"%len(self.solute))
else :
self.solute = None
self.nphosphor = 1.0 / float(len(self.phosphorsel))
self.nwater = 1.0 / float(len(self.watersel))
# Setup edges to cover the entire simulation box
zpos = self.processor.universe.coord._pos[:,2] - self.allsel.positions[:,2].mean()
self.resolution = 0.25
self.edges = np.arange(zpos.min(),zpos.max()+self.resolution,self.resolution)
self.zvals = 0.5 * (self.edges[:-1] + self.edges[1:]) * 0.1
self.ppos_curr = []
self.pdensity_curr = np.zeros(self.edges.shape[0]-1)
self.wdensity_curr = np.zeros(self.edges.shape[0]-1)
self.pdensity = []
self.wdensity = []
if self.solute is not None :
self.sdensity_curr = np.zeros(self.edges.shape[0]-1)
self.solute_snapshots = 0
self.sdensity = []
self.records = []
def process(self) :
zpos = self.phosphorsel.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.pdensity_curr += hist
self.ppos_curr.extend(zpos)
zpos = self.watersel.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.wdensity_curr += hist
if self.solute is not None :
zpos = self.solute.positions[:,2] - self.allsel.positions[:,2].mean()
hist, b = np.histogram(zpos, bins=self.edges)
self.sdensity_curr += hist
self.solute_snapshots += 1
def subsample(self) :
# Calculate D_hh
model = mixture.GaussianMixture(n_components=2)
model.fit(np.asarray(self.ppos_curr).reshape(-1,1))
dhh = np.abs(model.means_[1][0]-model.means_[0][0]) * 0.1
self.ppos_curr = []
# Calculate intercept of water and phosphor density,
# and from that the membrane volume
firsti, lasti = self.density_intercept(self.wdensity_curr, self.pdensity_curr)
firstz = self.zvals[firsti]
lastz = self.zvals[lasti]
memfrac = (lastz - firstz) / (self.processor.currbox[2] * 0.1)
# Calculate how many solutes there are inside the membrane
if self.solute is not None :
nfreq = 1 / float(self.processor.freq)
solute_dens = self.sdensity_curr / float(self.solute_snapshots)
ninside = int(np.round(solute_dens[firsti:lasti+1].sum()))
self.records.append(MDRecord(self.processor.currtime, [dhh, lastz - firstz,
self.processor.currbox[2] * 0.1, memfrac, ninside]))
else :
self.records.append(MDRecord(self.processor.currtime, [dhh, lastz - firstz,
self.processor.currbox[2] * 0.1, memfrac]))
# Store away the accumulayed densities and zero them
self.pdensity.append(self.pdensity_curr)
self.wdensity.append(self.wdensity_curr)
self.pdensity_curr = np.zeros(self.edges.shape[0]-1)
self.wdensity_curr = np.zeros(self.edges.shape[0]-1)
if self.solute is not None :
self.sdensity.append(self.sdensity_curr)
self.sdensity_curr = np.zeros(self.edges.shape[0]-1)
self.solute_snapshots = 0
def finalize(self):
def _write_density(density, scaling, postfix) :
density = np.asarray(density) * scaling
with open(self.out+postfix, "w") as f :
for z, av, err in zip(self.zvals, density.mean(axis=0),
density.std(axis=0)/np.sqrt(density.shape[0])) :
f.write("%.3f %.3f %.3f\n"%(z, av, err))
_write_density(self.pdensity, self.nphosphor, "_pdens.dat")
_write_density(self.wdensity, self.nwater, "_wdens.dat")
if self.solute is not None :
_write_density(self.sdensity, 1.0 / len(self.solute), "_sdens.dat")
self._write_records(postfix="_dt.txt")
vals = np.asarray([entry.value for entry in self.records])
with open(self.out+".txt", "w") as f :
f.write(" ".join("%.3f %.3f"%(av, err) for av, err in zip(vals.mean(axis=0),
vals.std(axis=0)/np.sqrt(vals.shape[0])))+"\n")
class MempropAnalysis(TrajectoryAction):
@staticmethod
def descr() :
return "Analyze common membrane properties"
def add_arguments(self, parser):
parser.add_argument('--pmask',help="the selectiom mask for phosphor atoms",default="name P")
parser.add_argument('--lipidmask',help="the selectiom mask for lipid residues",default="resname POPC")
parser.add_argument('--watmask',help="the selectiom mask for water residues",default="resname SOL")
parser.add_argument('--watvol',type=float,help="the volume of a water molecule in nm3",default=0.0306)
parser.add_argument('--gridout', help="the prefix for the filename of a 2D grid")
parser.add_argument('--protmask',help="the selectiom mask for protein residues")
parser.add_argument('-o','--out',help="the output prefix",default="memprop")
def setup(self,args):
self.out = args.out
self.phosphorsel = self.processor.universe.select_atoms(args.pmask)
self.lipidsel = self.processor.universe.select_atoms(args.lipidmask)
watsel = self.processor.universe.select_atoms(args.watmask)
self.nlipid = len(self.lipidsel.residues)
self.nwat = len(watsel.residues)
nphosph = len(self.phosphorsel.residues)
print("Number of lipids (%d), waters (%d) and phosphor atoms (%d)"%(self.nlipid,self.nwat,nphosph))
self.watvol = args.watvol
if self.nlipid == 0 or self.nwat == 0 or nphosph == 0 :
raise Exception("Either number of lipids (%d), water (%d) or phosphor atoms (%d) is zero"%(self.nlipid,self.nwat,nphosph))
self.apllist = []
self.vpllist = []
# Setup edges to cover the entire simulation box
zpos = self.processor.universe.coord._pos[:,2] - self.lipidsel.positions[:,2].mean()
self.resolution = 0.25
self.edges = np.arange(zpos.min(),zpos.max()+self.resolution,self.resolution)
self.density = np.zeros(self.edges.shape[0]+1)
# Setup arrays for RMSF calculations
self.sumcoords2 = np.zeros([nphosph,2])
self.sumcoords = np.zeros([nphosph,2])
self.records = []
self.gridout = args.gridout
if self.gridout is not None :
bounds = np.asarray([[0.0, 0.0, 0.0],self.processor.universe.dimensions[:3]])
self.grid_low = AnalysisGrid(bounds)
self.grid_upp = AnalysisGrid(bounds)
if args.protmask is not None :
self.protsel = self.processor.universe.select_atoms(args.protmask)
self.grid_prot = AnalysisGrid(bounds)
self.protone = np.ones(len(self.protsel))
else :
self.protsel = None
def process(self):
"""
Calculate APL, VPL and accumulate density of phosphor selection
"""
boxnm = self.processor.currbox / 10.0
self.apllist.append(boxnm[0]*boxnm[1]/float(self.nlipid/2))
self.vpllist.append((boxnm[0]*boxnm[1]*boxnm[2] -
self.watvol*self.nwat)/float(self.nlipid))
zpos = self.phosphorsel.positions[:,2] - self.lipidsel.positions[:,2].mean()
for lipdig in np.digitize(zpos,self.edges) :
self.density[lipdig] += 1
self.sumcoords += self.phosphorsel.positions[:,:2]
self.sumcoords2 += self.phosphorsel.positions[:,:2]*self.phosphorsel.positions[:,:2]
self.records.append(MDRecord(self.processor.currtime,[self.apllist[-1],self.vpllist[-1],self._calc_dhh(),self._calc_rmsf()]))
if self.gridout is not None :
mid = self.phosphorsel.center_of_geometry()
sel_low = self.phosphorsel.positions[:,2] < mid[2]
sel_upp = np.logical_not(sel_low)
coords_upp = self.phosphorsel.positions[sel_upp,:]
coords_low = self.phosphorsel.positions[sel_low,:]
self.grid_low.accumulate(coords_low-mid,
self._calc_zdist(coords_low, coords_upp))
self.grid_upp.accumulate(coords_upp-mid,
self._calc_zdist(coords_upp, coords_low))
if self.protsel is not None :
self.grid_prot.accumulate(self.protsel.positions-mid, self.protone)
def finalize(self):
"""
Calculate average APL and VPL as well as distance
between peaks in the phosphor density
"""
dhh = self._calc_dhh()
apl = np.asarray(self.apllist).mean()
vpl = np.asarray(self.vpllist).mean()
rmsf = self._calc_rmsf()
with open(self.out+".txt","w") as f :
f.write("%.3f\t%.3f\t%.3f\t%.3f\n"%(apl, vpl, dhh, rmsf))
self._write_records(postfix="_dt.txt")
if self.gridout is not None:
self.grid_low.average()
self.grid_low.write(self.gridout+"_low.dat")
self.grid_upp.average()
self.grid_upp.write(self.gridout+"_upp.dat")
if self.protsel is not None :
self.grid_prot.average()
self.grid_prot.write(self.gridout+"_prot.dat")
def _calc_dhh(self) :
mid = int(self.density.shape[0]/2)
dens_first = self.density[:mid]
dens_last = self.density[mid:]
max_first = np.argmax(dens_first)
max_last = np.argmax(dens_last)
return (max_last + mid - max_first) / 10.0 * self.resolution
def _calc_rmsf(self):
sumcoords = self.sumcoords / float(self.processor.nprocessed)
sumcoords2 = self.sumcoords2 / float(self.processor.nprocessed)
var = sumcoords2 - (sumcoords * sumcoords)
return var.sum(axis=1).mean()*0.01
def _calc_zdist(self, coords1, coords2) :
"""
Calculate the z-distance between all lipids in one leaflet and the closest lipid in the other leaflet
"""
dist = cdist(coords1[:,:2],coords2[:,:2],'sqeuclidean')
j = np.argmin(dist,axis=1)
return np.sqrt((coords2[j,2]-coords1[:,2])**2)*0.1
class MemVoronoiAnalysis(TrajectoryAction) :
@staticmethod
def descr() :
return "Voronoi analysis of a membrane patch"
def add_arguments(self, parser):
parser.add_argument('--mask',nargs="+",help="the selectiom mask for the atoms to do analysis on")
parser.add_argument('--head',help="the name of the atom to determine leaflets",default="PO4")
parser.add_argument('-o','--out',help="the output",default="memvoro")
def setup(self, args):
self.atoms = self.processor.universe.select_atoms(
" or ".join("(%s)"%m for m in args.mask))
self.head = self.processor.universe.select_atoms("name %s"%args.head)
self.out = args.out
self.aplrecords = []
self.neighrecords = []
self.resnames = list(set([atom.resname for atom in self.atoms]))
self.respairs = []
for i, resname1 in enumerate(self.resnames):
for resname2 in self.resnames[i:]:
self.respairs.append(resname1+"-"+resname2)
def process(self):
midz = self.head.positions[:,2].mean()
lowsel = self.atoms.positions[:,2] < midz
uppsel = np.logical_not(lowsel)
celldim = [[0.0, self.processor.currbox[0]],
[0.0, self.processor.currbox[1]]]
try :
lareas, lneighbours = self._process_leaflet(self.atoms[lowsel], celldim)
uareas, uneighbours = self._process_leaflet(self.atoms[uppsel], celldim)
except:
pass
else:
areas = 0.01 * 0.5 * (lareas + uareas)
neighbours = 0.5 * (lneighbours + uneighbours)
self.aplrecords.append(MDRecord(self.processor.currtime,areas))
self.neighrecords.append(MDRecord(self.processor.currtime,neighbours))
def _process_leaflet(self, atoms, celldim):
cells = pyvoro.compute_2d_voronoi(atoms.positions[:,:2],
celldim, 2.0, periodic=[True,True])
# Calculate the area per each residue type
areas = {resname : 0 for resname in self.resnames}
nres = {resname : 0.0 for resname in self.resnames}
for atom, cell in zip(atoms, cells):
areas[atom.resname] += cell["volume"]
nres[atom.resname] += 1.0
areaout = np.asarray([areas[resname] / nres[resname] for resname in self.resnames])
# Calculate the neighbors
vsets = [set((np.round(v[0],3),np.round(v[1])) for v in cell["vertices"]) for cell in cells]
emptyset = set([])
neighbors = {respair : 0 for respair in self.respairs}
npairs = {respair : 0 for respair in self.respairs}
for i, ivertices in enumerate(vsets):
counts = {respair : 0 for respair in self.respairs}
for j, jvertices in enumerate(vsets[i+1:],i+1):
if ivertices & jvertices != emptyset :
iresname = atoms[i].resname
jresname = atoms[j].resname
if iresname+"-"+jresname in neighbors:
counts[iresname+"-"+jresname] += 1
else:
counts[jresname+"-"+iresname] += 1
for respair in self.respairs:
if counts[respair] > 0 :
npairs[respair] += 1.0
neighbors[respair] += counts[respair]
neighout = np.asarray([neighbors[respair] / npairs[respair]
for respair in self.respairs])
return areaout, neighout
def finalize(self):
headers = ["Time"]
headers.extend(self.resnames)
self.records = self.aplrecords
self._write_records(postfix="_apl.txt", headers=headers)
headers = ["Time"]
headers.extend(self.respairs)
self.records = self.neighrecords
self._write_records(postfix="_neigh.txt", headers=headers)
class PrincipalAxisAnalysis(TrajectoryAction):
"""
Class to analyse the principcal axis and its angle
Attributes
----------
masses : list of float
the masses of the selected atoms
normal : numpy.ndarray
the normal to which the angle is calculate against
records : list of MDRecord
the recorded alpha (angle) values
selection : MDAnalysis.AtomGroup
the selection to make the analysis of
"""
@staticmethod
def descr() :
return "Analyze the principcal axis and its angle"
def add_arguments(self, parser):
parser.add_argument('-m','--mask',help="the selectiom mask",default="name CA")
parser.add_argument('-n','--normal',type=float,nargs=3,help="the normal vector",default=[0.0,0.0,1.0])
parser.add_argument('-o','--out',help="the output filename",default="alpha.txt")
def setup(self,args):
self.selection = self.processor.universe.select_atoms(args.mask)
self.masses = np.asarray([atom.mass for atom in self.selection])
self.normal = np.asarray(args.normal)
self.records = []
self.out = args.out
def process(self):
#xyz = pbc.make_whole_xyz(self.selection.positions,
# self.processor.currbox)
#moi = geo.moment_of_inertia(xyz-xyz.mean(axis=0),self.masses)
#princip = geo.principal_axes(moi)
princip = self.selection.principal_axes(pbc=True)
#alpha = geo.angle(princip[0,:],self.normal)
alpha = mdmath.angle(princip[0,:], self.normal)
dalpha = pbc.unwrap_vector(alpha,np.pi)
alpha = | np.abs(alpha-dalpha) | numpy.abs |
"""
Implements custom ufunc dispatch mechanism for non-CPU devices.
"""
from __future__ import print_function, absolute_import
import operator
import warnings
from functools import reduce
import numpy as np
from numba.utils import longint, OrderedDict
from numba.utils import IS_PY3
from numba.npyufunc.ufuncbuilder import _BaseUFuncBuilder, parse_identity
from numba import sigutils, types
from numba.typing import signature
from numba.npyufunc.sigparse import parse_signature
if IS_PY3:
def _exec(codestr, glbls):
exec(codestr, glbls)
else:
eval(compile("""
def _exec(codestr, glbls):
exec codestr in glbls
""",
"<_exec>", "exec"))
def _broadcast_axis(a, b):
"""
Raises
------
ValueError if broadcast fails
"""
if a == b:
return a
elif a == 1:
return b
elif b == 1:
return a
else:
raise ValueError("failed to broadcast {0} and {1}".format(a, b))
def _pairwise_broadcast(shape1, shape2):
"""
Raises
------
ValueError if broadcast fails
"""
shape1, shape2 = map(tuple, [shape1, shape2])
while len(shape1) < len(shape2):
shape1 = (1,) + shape1
while len(shape1) > len(shape2):
shape2 = (1,) + shape2
return tuple(_broadcast_axis(a, b) for a, b in zip(shape1, shape2))
def _multi_broadcast(*shapelist):
"""
Raises
------
ValueError if broadcast fails
"""
assert shapelist
result = shapelist[0]
others = shapelist[1:]
try:
for i, each in enumerate(others, start=1):
result = _pairwise_broadcast(result, each)
except ValueError:
raise ValueError("failed to broadcast argument #{0}".format(i))
else:
return result
class UFuncMechanism(object):
"""
Prepare ufunc arguments for vectorize.
"""
DEFAULT_STREAM = None
SUPPORT_DEVICE_SLICING = False
def __init__(self, typemap, args):
"""Never used directly by user. Invoke by UFuncMechanism.call().
"""
self.typemap = typemap
self.args = args
nargs = len(self.args)
self.argtypes = [None] * nargs
self.scalarpos = []
self.signature = None
self.arrays = [None] * nargs
def _fill_arrays(self):
"""
Get all arguments in array form
"""
for i, arg in enumerate(self.args):
if isinstance(arg, np.ndarray):
self.arrays[i] = arg
elif self.is_device_array(arg):
self.arrays[i] = arg
elif isinstance(arg, (int, longint, float, complex, np.number)):
# Is scalar
self.scalarpos.append(i)
else:
raise TypeError("argument #%d has invalid type" % (i + 1,))
def _fill_argtypes(self):
"""
Get dtypes
"""
for i, ary in enumerate(self.arrays):
if ary is not None:
self.argtypes[i] = ary.dtype
def _resolve_signature(self):
"""Resolve signature.
May have ambiguous case.
"""
matches = []
# Resolve scalar args exact match first
if self.scalarpos:
# Try resolve scalar arguments
for formaltys in self.typemap:
match_map = []
for i, (formal, actual) in enumerate(zip(formaltys,
self.argtypes)):
if actual is None:
actual = np.asarray(self.args[i]).dtype
match_map.append(actual == formal)
if all(match_map):
matches.append(formaltys)
# No matching with exact match; try coercing the scalar arguments
if not matches:
matches = []
for formaltys in self.typemap:
all_matches = all(actual is None or formal == actual
for formal, actual in
zip(formaltys, self.argtypes))
if all_matches:
matches.append(formaltys)
if not matches:
raise TypeError("No matching version. GPU ufunc requires array "
"arguments to have the exact types. This behaves "
"like regular ufunc with casting='no'.")
if len(matches) > 1:
raise TypeError("Failed to resolve ufunc due to ambiguous "
"signature. Too many untyped scalars. "
"Use numpy dtype object to type tag.")
# Try scalar arguments
self.argtypes = matches[0]
def _get_actual_args(self):
"""Return the actual arguments
Casts scalar arguments to numpy.array.
"""
for i in self.scalarpos:
self.arrays[i] = np.array([self.args[i]], dtype=self.argtypes[i])
return self.arrays
def _broadcast(self, arys):
"""Perform numpy ufunc broadcasting
"""
shapelist = [a.shape for a in arys]
shape = _multi_broadcast(*shapelist)
for i, ary in enumerate(arys):
if ary.shape == shape:
pass
else:
if self.is_device_array(ary):
arys[i] = self.broadcast_device(ary, shape)
else:
ax_differs = [ax for ax in range(len(shape))
if ax >= ary.ndim
or ary.shape[ax] != shape[ax]]
missingdim = len(shape) - len(ary.shape)
strides = [0] * missingdim + list(ary.strides)
for ax in ax_differs:
strides[ax] = 0
strided = np.lib.stride_tricks.as_strided(ary,
shape=shape,
strides=strides)
arys[i] = self.force_array_layout(strided)
return arys
def get_arguments(self):
"""Prepare and return the arguments for the ufunc.
Does not call to_device().
"""
self._fill_arrays()
self._fill_argtypes()
self._resolve_signature()
arys = self._get_actual_args()
return self._broadcast(arys)
def get_function(self):
"""Returns (result_dtype, function)
"""
return self.typemap[self.argtypes]
def is_device_array(self, obj):
"""Is the `obj` a device array?
Override in subclass
"""
return False
def broadcast_device(self, ary, shape):
"""Handles ondevice broadcasting
Override in subclass to add support.
"""
raise NotImplementedError("broadcasting on device is not supported")
def force_array_layout(self, ary):
"""Ensures array layout met device requirement.
Override in sublcass
"""
return ary
@classmethod
def call(cls, typemap, args, kws):
"""Perform the entire ufunc call mechanism.
"""
# Handle keywords
stream = kws.pop('stream', cls.DEFAULT_STREAM)
out = kws.pop('out', None)
if kws:
warnings.warn("unrecognized keywords: %s" % ', '.join(kws))
# Begin call resolution
cr = cls(typemap, args)
args = cr.get_arguments()
resty, func = cr.get_function()
outshape = args[0].shape
def attempt_ravel(a):
if cr.SUPPORT_DEVICE_SLICING:
raise NotImplementedError
try:
# Call the `.ravel()` method
return a.ravel()
except NotImplementedError:
# If it is not a device array
if not cr.is_device_array(a):
raise
# For device array, retry ravel on the host by first
# copying it back.
else:
hostary = cr.to_host(a, stream).ravel()
return cr.to_device(hostary, stream)
if args[0].ndim > 1:
args = [attempt_ravel(a) for a in args]
# Prepare argument on the device
devarys = []
any_device = False
for a in args:
if cr.is_device_array(a):
devarys.append(a)
any_device = True
else:
dev_a = cr.to_device(a, stream=stream)
devarys.append(dev_a)
# Launch
shape = args[0].shape
if out is None:
# No output is provided
devout = cr.device_array(shape, resty, stream=stream)
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
if any_device:
# If any of the arguments are on device,
# Keep output on the device
return devout.reshape(outshape)
else:
# Otherwise, transfer output back to host
return devout.copy_to_host().reshape(outshape)
elif cr.is_device_array(out):
# If output is provided and it is a device array,
# Return device array
if out.ndim > 1:
out = attempt_ravel(out)
devout = out
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
return devout.reshape(outshape)
else:
# If output is provided and it is a host array,
# Return host array
assert out.shape == shape
assert out.dtype == resty
devout = cr.device_array(shape, resty, stream=stream)
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
return devout.copy_to_host(out, stream=stream).reshape(outshape)
def to_device(self, hostary, stream):
"""Implement to device transfer
Override in subclass
"""
raise NotImplementedError
def to_host(self, devary, stream):
"""Implement to host transfer
Override in subclass
"""
raise NotImplementedError
def device_array(self, shape, dtype, stream):
"""Implements device allocation
Override in subclass
"""
raise NotImplementedError
def launch(self, func, count, stream, args):
"""Implements device function invocation
Override in subclass
"""
raise NotImplementedError
def to_dtype(ty):
return np.dtype(str(ty))
class DeviceVectorize(_BaseUFuncBuilder):
def __init__(self, func, identity=None, targetoptions={}):
assert not targetoptions
self.py_func = func
self.identity = parse_identity(identity)
# { arg_dtype: (return_dtype), cudakernel }
self.kernelmap = OrderedDict()
@property
def pyfunc(self):
return self.py_func
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
del argtypes
del restype
# compile core as device function
args, return_type = sigutils.normalize_signature(sig)
devfnsig = signature(return_type, *args)
funcname = self.pyfunc.__name__
kernelsource = self._get_kernel_source(self._kernel_template,
devfnsig, funcname)
corefn, return_type = self._compile_core(devfnsig)
glbl = self._get_globals(corefn)
sig = signature(types.void, *([a[:] for a in args] + [return_type[:]]))
_exec(kernelsource, glbl)
stager = glbl['__vectorized_%s' % funcname]
kernel = self._compile_kernel(stager, sig)
argdtypes = tuple(to_dtype(t) for t in devfnsig.args)
resdtype = to_dtype(return_type)
self.kernelmap[tuple(argdtypes)] = resdtype, kernel
def build_ufunc(self):
raise NotImplementedError
def _get_kernel_source(self, template, sig, funcname):
args = ['a%d' % i for i in range(len(sig.args))]
fmts = dict(name=funcname,
args=', '.join(args),
argitems=', '.join('%s[__tid__]' % i for i in args))
return template.format(**fmts)
def _compile_core(self, sig):
raise NotImplementedError
def _get_globals(self, corefn):
raise NotImplementedError
def _compile_kernel(self, fnobj, sig):
raise NotImplementedError
class DeviceGUFuncVectorize(_BaseUFuncBuilder):
def __init__(self, func, sig, identity=None, targetoptions={}):
# Allow nopython flag to be set.
if not targetoptions.pop('nopython', True):
raise TypeError("nopython flag must be True")
# Are there any more target options?
if targetoptions:
opts = ', '.join([repr(k) for k in targetoptions.keys()])
fmt = "The following target options are not supported: {0}"
raise TypeError(fmt.format(opts))
self.py_func = func
self.identity = parse_identity(identity)
self.signature = sig
self.inputsig, self.outputsig = parse_signature(self.signature)
assert len(self.outputsig) == 1, "only support 1 output"
# { arg_dtype: (return_dtype), cudakernel }
self.kernelmap = OrderedDict()
@property
def pyfunc(self):
return self.py_func
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
del argtypes
del restype
indims = [len(x) for x in self.inputsig]
outdims = [len(x) for x in self.outputsig]
funcname = self.py_func.__name__
src = expand_gufunc_template(self._kernel_template, indims,
outdims, funcname)
glbls = self._get_globals(sig)
_exec(src, glbls)
fnobj = glbls['__gufunc_{name}'.format(name=funcname)]
args, return_type = sigutils.normalize_signature(sig)
outertys = list(_determine_gufunc_outer_types(args, indims + outdims))
kernel = self._compile_kernel(fnobj, sig=tuple(outertys))
dtypes = tuple(np.dtype(str(t.dtype)) for t in outertys)
self.kernelmap[tuple(dtypes[:-1])] = dtypes[-1], kernel
def _compile_kernel(self, fnobj, sig):
raise NotImplementedError
def _get_globals(self, sig):
raise NotImplementedError
def _determine_gufunc_outer_types(argtys, dims):
for at, nd in zip(argtys, dims):
if isinstance(at, types.Array):
yield at.copy(ndim=nd + 1)
else:
if nd > 0:
raise ValueError("gufunc signature mismatch: ndim>0 for scalar")
yield types.Array(dtype=at, ndim=1, layout='A')
def expand_gufunc_template(template, indims, outdims, funcname):
"""Expand gufunc source template
"""
argdims = indims + outdims
argnames = ["arg{0}".format(i) for i in range(len(argdims))]
checkedarg = "min({0})".format(', '.join(["{0}.shape[0]".format(a)
for a in argnames]))
inputs = [_gen_src_for_indexing(aref, adims, _gen_src_for_input_indexing)
for aref, adims in zip(argnames, indims)]
outputs = [_gen_src_for_indexing(aref, adims, _gen_src_for_output_indexing)
for aref, adims in zip(argnames[len(indims):], outdims)]
argitems = inputs + outputs
src = template.format(name=funcname, args=', '.join(argnames),
checkedarg=checkedarg,
argitems=', '.join(argitems))
return src
def _gen_src_for_indexing(aref, adims, gen_sliced):
return "{aref}[{sliced}]".format(aref=aref, sliced=gen_sliced(adims))
def _gen_src_for_input_indexing(adims):
if adims > 0:
return _gen_src_for_array_indexing(adims)
else:
return '__tid__'
def _gen_src_for_output_indexing(adims):
if adims > 0:
return _gen_src_for_array_indexing(adims)
else:
return '__tid__:(__tid__ + 1)'
def _gen_src_for_array_indexing(adims):
return ','.join(['__tid__'] + [':'] * adims)
class GUFuncEngine(object):
'''Determine how to broadcast and execute a gufunc
base on input shape and signature
'''
@classmethod
def from_signature(cls, signature):
return cls(*parse_signature(signature))
def __init__(self, inputsig, outputsig):
# signatures
self.sin = inputsig
self.sout = outputsig
# argument count
self.nin = len(self.sin)
self.nout = len(self.sout)
def schedule(self, ishapes):
if len(ishapes) != self.nin:
raise TypeError('invalid number of input argument')
# associate symbol values for input signature
symbolmap = {}
outer_shapes = []
inner_shapes = []
for argn, (shape, symbols) in enumerate(zip(ishapes, self.sin)):
argn += 1 # start from 1 for human
inner_ndim = len(symbols)
if len(shape) < inner_ndim:
fmt = "arg #%d: insufficient inner dimension"
raise ValueError(fmt % (argn,))
if inner_ndim:
inner_shape = shape[-inner_ndim:]
outer_shape = shape[:-inner_ndim]
else:
inner_shape = ()
outer_shape = shape
for axis, (dim, sym) in enumerate(zip(inner_shape, symbols)):
axis += len(outer_shape)
if sym in symbolmap:
if symbolmap[sym] != dim:
fmt = "arg #%d: shape[%d] mismatch argument"
raise ValueError(fmt % (argn, axis))
symbolmap[sym] = dim
outer_shapes.append(outer_shape)
inner_shapes.append(inner_shape)
# solve output shape
oshapes = []
for outsig in self.sout:
oshape = []
for sym in outsig:
oshape.append(symbolmap[sym])
oshapes.append(tuple(oshape))
# find the biggest outershape as looping dimension
sizes = [reduce(operator.mul, s, 1) for s in outer_shapes]
largest_i = np.argmax(sizes)
loopdims = outer_shapes[largest_i]
pinned = [False] * self.nin # same argument for each iteration
for i, d in enumerate(outer_shapes):
if d != loopdims:
if d == (1,) or d == ():
pinned[i] = True
else:
fmt = "arg #%d: outer dimension mismatch"
raise ValueError(fmt % (i + 1,))
return GUFuncSchedule(self, inner_shapes, oshapes, loopdims, pinned)
class GUFuncSchedule(object):
def __init__(self, parent, ishapes, oshapes, loopdims, pinned):
self.parent = parent
# core shapes
self.ishapes = ishapes
self.oshapes = oshapes
# looping dimension
self.loopdims = loopdims
self.loopn = reduce(operator.mul, loopdims, 1)
# flags
self.pinned = pinned
self.output_shapes = [loopdims + s for s in oshapes]
def __str__(self):
import pprint
attrs = 'ishapes', 'oshapes', 'loopdims', 'loopn', 'pinned'
values = [(k, getattr(self, k)) for k in attrs]
return pprint.pformat(dict(values))
class GenerializedUFunc(object):
def __init__(self, kernelmap, engine):
self.kernelmap = kernelmap
self.engine = engine
self.max_blocksize = 2 ** 30
assert self.engine.nout == 1, "only support single output"
def __call__(self, *args, **kws):
callsteps = self._call_steps(args, kws)
callsteps.prepare_inputs()
indtypes, schedule, outdtype, kernel = self._schedule(
callsteps.norm_inputs, callsteps.output)
callsteps.adjust_input_types(indtypes)
callsteps.allocate_outputs(schedule, outdtype)
callsteps.prepare_kernel_parameters()
newparams, newretval = self._broadcast(schedule,
callsteps.kernel_parameters,
callsteps.kernel_returnvalue)
callsteps.launch_kernel(kernel, schedule.loopn, newparams + [newretval])
return callsteps.post_process_result()
def _schedule(self, inputs, out):
input_shapes = [a.shape for a in inputs]
schedule = self.engine.schedule(input_shapes)
# find kernel
idtypes = tuple(i.dtype for i in inputs)
try:
outdtype, kernel = self.kernelmap[idtypes]
except KeyError:
# No exact match, then use the first compatbile.
# This does not match the numpy dispatching exactly.
# Later, we may just jit a new version for the missing signature.
idtypes = self._search_matching_signature(idtypes)
# Select kernel
outdtype, kernel = self.kernelmap[idtypes]
# check output
if out is not None and schedule.output_shapes[0] != out.shape:
raise ValueError('output shape mismatch')
return idtypes, schedule, outdtype, kernel
def _search_matching_signature(self, idtypes):
"""
Given the input types in `idtypes`, return a compatible sequence of
types that is defined in `kernelmap`.
Note: Ordering is guaranteed by `kernelmap` being a OrderedDict
"""
for sig in self.kernelmap.keys():
if all(np.can_cast(actual, desired)
for actual, desired in zip(sig, idtypes)):
return sig
else:
raise TypeError("no matching signature")
def _broadcast(self, schedule, params, retval):
assert schedule.loopn > 0, "zero looping dimension"
odim = 1 if not schedule.loopdims else schedule.loopn
newparams = []
for p, cs in zip(params, schedule.ishapes):
if not cs and p.size == 1:
# Broadcast scalar input
devary = self._broadcast_scalar_input(p, odim)
newparams.append(devary)
else:
# Broadcast vector input
newparams.append(self._broadcast_array(p, odim, cs))
newretval = retval.reshape(odim, *schedule.oshapes[0])
return newparams, newretval
def _broadcast_array(self, ary, newdim, innerdim):
newshape = (newdim,) + innerdim
# No change in shape
if ary.shape == newshape:
return ary
# Creating new dimension
elif len(ary.shape) < len(newshape):
assert newshape[-len(ary.shape):] == ary.shape, \
"cannot add dim and reshape at the same time"
return self._broadcast_add_axis(ary, newshape)
# Collapsing dimension
else:
return ary.reshape(*newshape)
def _broadcast_add_axis(self, ary, newshape):
raise NotImplementedError("cannot add new axis")
def _broadcast_scalar_input(self, ary, shape):
raise NotImplementedError
class GUFuncCallSteps(object):
__slots__ = [
'args',
'kwargs',
'output',
'norm_inputs',
'kernel_returnvalue',
'kernel_parameters',
'_is_device_array',
'_need_device_conversion',
]
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
self.output = self.kwargs.get('out')
self._is_device_array = [self.is_device_array(a) for a in self.args]
self._need_device_conversion = not any(self._is_device_array)
# Normalize inputs
inputs = []
for a, isdev in zip(self.args, self._is_device_array):
if isdev:
inputs.append(a)
else:
inputs.append( | np.array(a) | numpy.array |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import pybullet as p
import pybullet_data
import time
import math
from sklearn.preprocessing import normalize
## Hyper Params
MAX_EPISODE_LEN = 150 # Number of steps for one training episode
REWARD_FACTOR = 10
BOUND_ANGLE = 45
STEP_ANGLE = 15 # Maximum angle delta per step
JOINT_LENGTH = 0.05
def leg_IK(angle, length, offset, sign=1):
"""
Returns each angle in the joint of the leg, to match the desired swing angle and leg extension (length).
"""
length = max(length, JOINT_LENGTH * 0.2)
# Inner angle alpha
cosAngle0 = (length**2) / (2 * JOINT_LENGTH * length)
alpha = np.arccos(cosAngle0) * sign + angle
#if alpha < 0:
# sign = -sign
# Inner angle beta
cosAngle1 = (-(length**2) + JOINT_LENGTH**2 + JOINT_LENGTH**2) / (2 * JOINT_LENGTH * JOINT_LENGTH)
beta = -sign * (np.pi - np.arccos(cosAngle1)) + offset
if math.isnan(alpha):
print("alpha is nan")
alpha = 0
if math.isnan(beta):
print("beta is nan")
beta = 0
return alpha, beta
def joint_extension(x):
return 2 - ( -( (x-1)/2 ))**2
class OpenCatGymEnv(gym.Env):
""" Gym environment (stable baselines 3) for OpenCat robots.
"""
metadata = {'render.modes': ['human']}
def __init__(self, render=False):
self.step_counter = 0
# Store robot state and joint history
self.state_robot_history = np.array([])
self.jointAngles_history = np.array([])
# Max joint angles
self.boundAngles = np.deg2rad(BOUND_ANGLE)
# Create the simulation, p.GUI for GUI, p.DIRECT for only training
# Use options="--opengl2" if it decides to not work?
if render:
p.connect(p.GUI)#, options="--opengl2") #, options="--width=960 --height=540 --mp4=\"training.mp4\" --mp4fps=60") # uncomment to create a video
else:
p.connect(p.DIRECT)
p.setPhysicsEngineParameter(fixedTimeStep=1.0/60)
# Stop rendering
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
# Move camera
p.resetDebugVisualizerCamera(cameraDistance=0.5,
cameraYaw=-10,
cameraPitch=-40,
cameraTargetPosition=[0.4,0,0])
# The action space contains the 11 joint angles
self.action_space = spaces.Box(np.array([-1]*11), np.array([1]*11))
# The observation space are the torso roll, pitch and the joint angles and a history of the last 20 joint angles
# 11 * 20 + 6 = 226
self.observation_space = spaces.Box(np.array([-1]*226), np.array([1]*226))
def get_desired_joint_angles(self, jointAngles, action):
""""Adds the action vector to the revolute joints. Joint angles are
clipped. `jointAngles` is changed to have the updated angles of the
entire robot. The vector returned contains the only the revolute joints
of the robot."""
# Below is the mapping from the old 3D model to the new 3D model.
# -----------------------------------------------------------
# | PREVIOUS | NEW | NEW INDEX |
# |=========================================================|
# | hip_right | R_Rear_Hip_Servo_Thigh | i = 1 |
# | knee_right | R_Rear_Knee_Servo | i = 3 |
# |---------------------------------------------------------|
# | shoulder_right | R_Front_Hip_Servo_Thigh | i = 7 |
# | elbow_right | R_Front_Knee_Servo | i = 9 |
# |---------------------------------------------------------|
# | ############# | Body_Neck_Servo | i = 13 |
# | ############# | Neck_Head_Servo | i = 14 |
# | --------------------------------------------------------|
# | ############# | Tail_Servo_Tail | i = 19 |
# |---------------------------------------------------------|
# | hip_left | L_Rear_Hip_Servo_Thigh | i = 21 |
# | knee_left | L_Rear_Knee_Servo | i = 23 |
# |---------------------------------------------------------|
# | shoulder_left | L_Front_Hip_Servo_Thigh | i = 27 |
# | elbow_left | L_Front_Knee_Servo | i = 29 |
# -----------------------------------------------------------
desiredJointAngles = jointAngles
ds = np.deg2rad(STEP_ANGLE) # Maximum joint angle derivative (maximum change per step), should be implemented in setJointMotorControlArray
# Use IK to compute the new angles of each joint
desired_left_front_angle = np.deg2rad(BOUND_ANGLE * action[0])
desired_left_front_length = JOINT_LENGTH * joint_extension(action[1])
desired_right_front_angle = np.deg2rad(BOUND_ANGLE * action[2])
desired_right_front_length = JOINT_LENGTH * joint_extension(action[3])
desired_left_rear_angle = np.deg2rad(BOUND_ANGLE * action[4])
desired_left_rear_length = JOINT_LENGTH * joint_extension(action[5])
desired_right_rear_angle = | np.deg2rad(BOUND_ANGLE * action[6]) | numpy.deg2rad |
# Based on https://github.com/kuangliu/pytorch-cifar
import torch.optim as optim
import torch.backends.cudnn as cudnn
import os
from models import *
from utils import (train_for_an_epoch, test_after_epoch, prepare_dataloaders, random_index_generation,
loss_and_probs_over_unlabeled, label_additional_data)
import copy
import torch
import random
import matplotlib.pyplot as plt
import time
import numpy as np
torch.manual_seed(119)
random.seed(119)
| np.random.seed(119) | numpy.random.seed |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# TODO: import only necessary tensorflow functions
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay,\
roc_curve, roc_auc_score, classification_report, accuracy_score, precision_score, recall_score
# TODO: Add docstrings
# Loads the Patch Camelyon dataset
def load_pcam(data_dir=None):
pcam, pcam_info = tfds.load("patch_camelyon", with_info=True, data_dir=data_dir)
print(pcam_info)
return pcam, pcam_info
# Converts images to prepare them for modelling
def convert_sample(sample):
# Credit: <NAME>
image, label = sample['image'], sample['label']
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Alternative to convert_sample which also converts images to grayscale
def convert_sample_grayscale(sample):
image, label = sample['image'], sample['label']
image = tf.image.rgb_to_grayscale(image, name=None)
image = tf.image.convert_image_dtype(image, tf.float32)
label = tf.one_hot(label, 2, dtype=tf.float32)
return image, label
# Substitute for ImageDataGenerator which gets along with the TensorFlow Dataset object
def build_pipelines(pcam, grayscale=False):
# Uses the grayscale version of convert_sample
if grayscale:
train_pipeline = pcam['train'].map(convert_sample_grayscale, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample_grayscale, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample_grayscale, num_parallel_calls=8).batch(128).prefetch(2)
# Uses the normal version of convert_sample
else:
# Credit: <NAME>
train_pipeline = pcam['train'].map(convert_sample, num_parallel_calls=8).shuffle(1024).repeat().batch(64).prefetch(2)
valid_pipeline = pcam['validation'].map(convert_sample, num_parallel_calls=8).repeat().batch(128).prefetch(2)
test_pipeline = pcam['test'].map(convert_sample, num_parallel_calls=8).batch(128).prefetch(2)
return train_pipeline, valid_pipeline, test_pipeline
# Export the training history to a .csv file
def save_history(hist_df, filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_csv_file = filepath
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
# Loads model training history .csv into a pandas dataframe
def load_history(filepath):
# Sample filepath: 'data/models/history/cnn1_history.csv'
hist_df = pd.read_csv(filepath, index_col=0)
return hist_df
# Plot the training accuracy and loss from training history
def plot_history(hist_df, figsize=(10,4), title=None, save=False, filepath=None):
# Create subplots
plt.subplots(1, 2, figsize=figsize)
# Creates a title for the whole plot
plt.suptitle(title, fontsize=24)
# Plot accuracies for train and validation sets
plt.subplot(1, 2, 1)
plt.plot(hist_df['accuracy'], label='Train', marker='o')
plt.plot(hist_df['val_accuracy'], label='Validation', marker='o')
plt.title('Training and Validation Accuracy', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Accuracy', size=16)
plt.legend()
# Plot losses
plt.subplot(1, 2, 2)
plt.plot(hist_df['loss'], label='Train', marker='o')
plt.plot(hist_df['val_loss'], label='Validation', marker='o')
plt.title('Training and Validation Loss', size=20)
plt.xlabel('Epoch', size=16)
plt.ylabel('Loss', size=16)
plt.legend()
# This ensures the subplots do not overlap
plt.tight_layout()
if save:
# Sample filepath: 'data/plots/cnn1_acc_loss_plot.png'
plt.savefig(filepath)
# Show the subplots
plt.show()
# Plot the confusion matrix for a model
def plot_cf_matrix(y_true, y_pred, normalize=True, save=False, filepath=None):
cf_matrix = confusion_matrix(y_true, y_pred)
# Turns the values in the confusion matrix into percentages
if normalize:
cf_matrix = cf_matrix / cf_matrix.sum(axis=1)
ConfusionMatrixDisplay(cf_matrix, display_labels=['Healthy (0)', 'Cancer (1)']).plot()
if save:
# Sample filepath: 'data/plots/cnn1_cf_matrix.png'
plt.savefig(filepath)
plt.show()
# Plot the ROC curve and calculate AUC
def plot_roc_curve(y_true, y_proba, save=False, filepath=None):
if y_proba.shape[1] == 2:
# y_proba is still one-hot encoded, so grab only the class 1 probabilities
y_proba = np.array([i[1] for i in y_proba])
fprs, tprs, thresholds = roc_curve(y_true, y_proba)
roc_auc = roc_auc_score(y_true, y_proba)
plt.figure(figsize=(8, 6))
plt.plot(fprs, tprs, color='darkorange',
lw=2, label='AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlabel('False Positive Rate (FPR)', size=16)
plt.ylabel('True Positive Rate (TPR)', size=16)
plt.title('ROC Curve for Cancer Detection', size=20)
plt.legend(loc="best")
if save:
# Sample filepath: 'data/plots/cnn1_roc.png'
plt.savefig(filepath)
plt.show()
print(f'Area under curve (AUC):{roc_auc}')
# Create a list of ground truth labels from a specified data split
def generate_y_true(pcam, split='test'):
# Initialize iterator so it starts from the beginning
iterator = pcam[split].__iter__()
# Create an empty list to store the labels
y_true = []
if split == 'train':
# There are 262144 images in the training set
for i in range(262144):
y_true.append(int(iterator.get_next()['label']))
else:
# There are 32768 images in the validation and test sets
for i in range(32768):
y_true.append(int(iterator.get_next()['label']))
return np.array(y_true)
# Get predictions as probabilities from a trained model
def generate_y_proba(model, test_pipeline, class_1=False, save=False, filepath=None):
y_proba = model.predict(test_pipeline)
if class_1:
# Return just the class_1 predictions rather than one-hot encoded predictions
y_proba = | np.array([i[1] for i in y_proba]) | numpy.array |
import numpy as np
import scipy
def SEIR(x, M_g, M_f, pop, ts, pop0, sd=[]):
#the Adaptive metapopulation SEIR model
dt = 1.
tmstep = 1
#integrate forward for one day
num_loc = pop.shape[0]
(_, num_ens) = x.shape
#S,E,Id,Iu,obs,beta,mu,theta_g,theta_f,Z,alpha,D
Sidx = np.arange(1, 5*num_loc, 5).T
Eidx = np.arange(2, 5*num_loc, 5).T
Ididx = np.arange(3, 5*num_loc, 5).T
Iuidx = np.arange(4, 5*num_loc, 5).T
obsidx = np.arange(5, 5*num_loc+5, 5).T
betaidx = 5*num_loc+1
muidx = 5*num_loc+2
thetagidx = 5*num_loc+3
thetafidx = 5*num_loc+4
Zidx = 5*num_loc+5
gammaidx = 5*num_loc+6
Didx = 5*num_loc+7
S = np.zeros((num_loc, num_ens, tmstep+1))
E = np.zeros((num_loc, num_ens, tmstep+1))
Id = np.zeros((num_loc, num_ens, tmstep+1))
Iu = np.zeros((num_loc, num_ens, tmstep+1))
Incidence = np.zeros((num_loc, num_ens, tmstep+1))
Incidence_u = | np.zeros((num_loc, num_ens, tmstep+1)) | numpy.zeros |
# 2D dataset loaders
import data.data_hcp as data_hcp
import data.data_abide as data_abide
import data.data_nci as data_nci
import data.data_promise as data_promise
import data.data_pirad_erc as data_pirad_erc
import data.data_mnms as data_mnms
import data.data_wmh as data_wmh
import data.data_scgm as data_scgm
# other imports
import logging
import config.system_paths as sys_config
import numpy as np
# ==================================================================
# TRAINING DATA LOADER
# ==================================================================
def load_test_data(dataset,
image_size,
target_resolution,
cv_fold_num = 1):
# ================================================================
# NCI
# ================================================================
if dataset in ['RUNMC', 'BMC']:
logging.info('Reading NCI - ' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_nci)
data_pros = data_nci.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_nci,
preprocessing_folder = sys_config.preproc_folder_nci,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset,
cv_fold_num = cv_fold_num)
imtr = data_pros['images_train']
gttr = data_pros['labels_train']
orig_data_res_x = data_pros['px_train'][:]
orig_data_res_y = data_pros['py_train'][:]
orig_data_res_z = data_pros['pz_train'][:]
orig_data_siz_x = data_pros['nx_train'][:]
orig_data_siz_y = data_pros['ny_train'][:]
orig_data_siz_z = data_pros['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_pros['images_validation']
gtvl = data_pros['labels_validation']
orig_data_siz_z_val = data_pros['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['UCL', 'BIDMC', 'HK']:
logging.info('Reading' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_promise)
data_pros = data_promise.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_promise,
preprocessing_folder = sys_config.preproc_folder_promise,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset,
cv_fold_num = cv_fold_num)
imtr = data_pros['images_train']
gttr = data_pros['labels_train']
orig_data_res_x = data_pros['px_train'][:]
orig_data_res_y = data_pros['py_train'][:]
orig_data_res_z = data_pros['pz_train'][:]
orig_data_siz_x = data_pros['nx_train'][:]
orig_data_siz_y = data_pros['ny_train'][:]
orig_data_siz_z = data_pros['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_pros['images_validation']
gtvl = data_pros['labels_validation']
orig_data_siz_z_val = data_pros['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['USZ']:
logging.info('Reading PIRAD_ERC images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_pirad_erc)
data_pros_train = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = 40,
idx_end = 68,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek',
force_overwrite = False)
imtr = data_pros_train['images']
gttr = data_pros_train['labels']
orig_data_res_x = data_pros_train['px'][:]
orig_data_res_y = data_pros_train['py'][:]
orig_data_res_z = data_pros_train['pz'][:]
orig_data_siz_x = data_pros_train['nx'][:]
orig_data_siz_y = data_pros_train['ny'][:]
orig_data_siz_z = data_pros_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_pros_val = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = 20,
idx_end = 40,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek',
force_overwrite = False)
imvl = data_pros_val['images']
gtvl = data_pros_val['labels']
orig_data_siz_z_val = data_pros_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# CARDIAC (MNMS)
# ================================================================
elif dataset in ['HVHD', 'CSF', 'UHE']:
logging.info('Reading MNMS - ' + dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_mnms)
data_cardiac = data_mnms.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_mnms,
preprocessing_folder = sys_config.preproc_folder_mnms,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = dataset)
imtr = data_cardiac['images_train']
gttr = data_cardiac['labels_train']
orig_data_res_x = data_cardiac['px_train'][:]
orig_data_res_y = data_cardiac['py_train'][:]
orig_data_res_z = data_cardiac['pz_train'][:]
orig_data_siz_x = data_cardiac['nx_train'][:]
orig_data_siz_y = data_cardiac['ny_train'][:]
orig_data_siz_z = data_cardiac['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_cardiac['images_validation']
gtvl = data_cardiac['labels_validation']
orig_data_siz_z_val = data_cardiac['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# Brain lesions (WMH)
# ================================================================
elif dataset in ['UMC', 'NUHS']:
data_brain_lesions = data_wmh.load_and_maybe_process_data(sys_config.orig_data_root_wmh,
sys_config.preproc_folder_wmh,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = dataset,
cv_fold_number = cv_fold_num,
protocol = 'FLAIR')
imtr = data_brain_lesions['images_train']
gttr = data_brain_lesions['labels_train']
orig_data_res_x = data_brain_lesions['px_train'][:]
orig_data_res_y = data_brain_lesions['py_train'][:]
orig_data_res_z = data_brain_lesions['pz_train'][:]
orig_data_siz_x = data_brain_lesions['nx_train'][:]
orig_data_siz_y = data_brain_lesions['ny_train'][:]
orig_data_siz_z = data_brain_lesions['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_brain_lesions['images_validation']
gtvl = data_brain_lesions['labels_validation']
orig_data_siz_z_val = data_brain_lesions['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['site1', 'site2', 'site3', 'site4']:
data_gm = data_scgm.load_and_maybe_process_data(sys_config.orig_data_root_scgm,
sys_config.preproc_folder_scgm,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = dataset,
cv_fold_number = cv_fold_num)
imtr = data_gm['images_train']
gttr = data_gm['labels_train']
orig_data_res_x = data_gm['px_train'][:]
orig_data_res_y = data_gm['py_train'][:]
orig_data_res_z = data_gm['pz_train'][:]
orig_data_siz_x = data_gm['nx_train'][:]
orig_data_siz_y = data_gm['ny_train'][:]
orig_data_siz_z = data_gm['nz_train'][:]
num_train_subjects = orig_data_siz_z.shape[0]
imvl = data_gm['images_validation']
gtvl = data_gm['labels_validation']
orig_data_siz_z_val = data_gm['nz_validation'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
# ================================================================
# HCP T1 / T2
# ================================================================
elif dataset in ['HCPT1', 'HCPT2']:
logging.info('Reading ' + str(dataset) + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_hcp)
data_brain_train = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = 0,
idx_end = 20,
protocol = dataset[-2:],
size = image_size,
depth = 256,
target_resolution = target_resolution)
imtr = data_brain_train['images']
gttr = data_brain_train['labels']
orig_data_res_x = data_brain_train['px'][:]
orig_data_res_y = data_brain_train['py'][:]
orig_data_res_z = data_brain_train['pz'][:]
orig_data_siz_x = data_brain_train['nx'][:]
orig_data_siz_y = data_brain_train['ny'][:]
orig_data_siz_z = data_brain_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_brain_val = data_hcp.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_hcp,
preprocessing_folder = sys_config.preproc_folder_hcp,
idx_start = 20,
idx_end = 25,
protocol = dataset[-2:],
size = image_size,
depth = 256,
target_resolution = target_resolution)
imvl = data_brain_val['images']
gtvl = data_brain_val['labels']
orig_data_siz_z_val = data_brain_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
elif dataset in ['CALTECH']:
logging.info('Reading CALTECH images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_abide + 'CALTECH/')
data_brain_train = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'CALTECH',
idx_start = 0,
idx_end = 10,
protocol = 'T1',
size = image_size,
depth = 256,
target_resolution = target_resolution)
imtr = data_brain_train['images']
gttr = data_brain_train['labels']
orig_data_res_x = data_brain_train['px'][:]
orig_data_res_y = data_brain_train['py'][:]
orig_data_res_z = data_brain_train['pz'][:]
orig_data_siz_x = data_brain_train['nx'][:]
orig_data_siz_y = data_brain_train['ny'][:]
orig_data_siz_z = data_brain_train['nz'][:]
num_train_subjects = orig_data_siz_z.shape[0]
data_brain_val = data_abide.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_abide,
preprocessing_folder = sys_config.preproc_folder_abide,
site_name = 'CALTECH',
idx_start = 10,
idx_end = 15,
protocol = 'T1',
size = image_size,
depth = 256,
target_resolution = target_resolution)
imvl = data_brain_val['images']
gtvl = data_brain_val['labels']
orig_data_siz_z_val = data_brain_val['nz'][:]
num_val_subjects = orig_data_siz_z_val.shape[0]
return (imtr, # 0
gttr, # 1
orig_data_res_x, # 2
orig_data_res_y, # 3
orig_data_res_z, # 4
orig_data_siz_x, # 5
orig_data_siz_y, # 6
orig_data_siz_z, # 7
num_train_subjects, # 8
imvl, # 9
gtvl, # 10
orig_data_siz_z_val, # 11
num_val_subjects) # 12
# ==================================================================
# TEST DATA LOADER
# ==================================================================
def load_testing_data(test_dataset,
cv_fold_num,
image_size,
target_resolution,
image_depth):
# ================================================================
# PROMISE
# ================================================================
if test_dataset in ['UCL', 'BIDMC', 'HK']:
data_pros = data_promise.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_promise,
preprocessing_folder = sys_config.preproc_folder_promise,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset,
cv_fold_num = cv_fold_num)
imts = data_pros['images_test']
gtts = data_pros['labels_test']
orig_data_res_x = data_pros['px_test'][:]
orig_data_res_y = data_pros['py_test'][:]
orig_data_res_z = data_pros['pz_test'][:]
orig_data_siz_x = data_pros['nx_test'][:]
orig_data_siz_y = data_pros['ny_test'][:]
orig_data_siz_z = data_pros['nz_test'][:]
name_test_subjects = data_pros['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# USZ
# ================================================================
elif test_dataset == 'USZ':
image_depth = 32
z_resolution = 2.5
idx_start = 0
idx_end = 20
data_pros = data_pirad_erc.load_data(input_folder = sys_config.orig_data_root_pirad_erc,
preproc_folder = sys_config.preproc_folder_pirad_erc,
idx_start = idx_start,
idx_end = idx_end,
size = image_size,
target_resolution = target_resolution,
labeller = 'ek')
imts = data_pros['images']
gtts = data_pros['labels']
orig_data_res_x = data_pros['px'][:]
orig_data_res_y = data_pros['py'][:]
orig_data_res_z = data_pros['pz'][:]
orig_data_siz_x = data_pros['nx'][:]
orig_data_siz_y = data_pros['ny'][:]
orig_data_siz_z = data_pros['nz'][:]
name_test_subjects = data_pros['patnames']
num_test_subjects = 10 # orig_data_siz_z.shape[0]
ids = np.arange(idx_start, idx_end)
# ================================================================
# NCI
# ================================================================
elif test_dataset in ['BMC', 'RUNMC']:
logging.info('Reading ' + test_dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_nci)
data_pros = data_nci.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_nci,
preprocessing_folder = sys_config.preproc_folder_nci,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset,
cv_fold_num = cv_fold_num)
imts = data_pros['images_test']
gtts = data_pros['labels_test']
orig_data_res_x = data_pros['px_test'][:]
orig_data_res_y = data_pros['py_test'][:]
orig_data_res_z = data_pros['pz_test'][:]
orig_data_siz_x = data_pros['nx_test'][:]
orig_data_siz_y = data_pros['ny_test'][:]
orig_data_siz_z = data_pros['nz_test'][:]
name_test_subjects = data_pros['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# CARDIAC (MNMS)
# ================================================================
elif test_dataset == 'HVHD' or test_dataset == 'CSF' or test_dataset == 'UHE':
logging.info('Reading MNMS - ' + test_dataset + ' images...')
logging.info('Data root directory: ' + sys_config.orig_data_root_mnms)
data_cardiac = data_mnms.load_and_maybe_process_data(input_folder = sys_config.orig_data_root_mnms,
preprocessing_folder = sys_config.preproc_folder_mnms,
size = image_size,
target_resolution = target_resolution,
force_overwrite = False,
sub_dataset = test_dataset)
imts = data_cardiac['images_test']
gtts = data_cardiac['labels_test']
orig_data_res_x = data_cardiac['px_test'][:]
orig_data_res_y = data_cardiac['py_test'][:]
orig_data_res_z = data_cardiac['pz_test'][:]
orig_data_siz_x = data_cardiac['nx_test'][:]
orig_data_siz_y = data_cardiac['ny_test'][:]
orig_data_siz_z = data_cardiac['nz_test'][:]
name_test_subjects = data_cardiac['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = np.arange(num_test_subjects)
# ================================================================
# Brain lesions (WMH)
# ================================================================
elif test_dataset == 'UMC' or test_dataset == 'NUHS':
data_brain_lesions = data_wmh.load_and_maybe_process_data(sys_config.orig_data_root_wmh,
sys_config.preproc_folder_wmh,
image_size,
target_resolution,
force_overwrite=False,
sub_dataset = test_dataset,
cv_fold_number = cv_fold_num,
protocol = 'FLAIR')
imts = data_brain_lesions['images_test']
gtts = data_brain_lesions['labels_test']
orig_data_res_x = data_brain_lesions['px_test'][:]
orig_data_res_y = data_brain_lesions['py_test'][:]
orig_data_res_z = data_brain_lesions['pz_test'][:]
orig_data_siz_x = data_brain_lesions['nx_test'][:]
orig_data_siz_y = data_brain_lesions['ny_test'][:]
orig_data_siz_z = data_brain_lesions['nz_test'][:]
name_test_subjects = data_brain_lesions['patnames_test']
num_test_subjects = orig_data_siz_z.shape[0]
ids = | np.arange(num_test_subjects) | numpy.arange |
import os
import copy
import zeus
import emcee
import snowline
import contextlib
import numpy as np
from scipy.special import loggamma
from scipy.optimize import differential_evolution
class LinFit(object):
"""The LinFit class.
Implements methods to fit straight lines or planes, including taking data and a covariance matrix and fitting
either to just find the best fit, or run an MCMC. Has four main attributes, that are useful for
accessing other information after running 'optimize', 'emcee' or 'zeus'.
Attributes
----------
coords: ndarray
N dimensional array holding the best-fitting HyperFit parameters in the data coordinates
after a call to 'optimize' or one of the MCMC routines. Otherwise zeros.
normal: ndarray
N dimensional array holding the best-fitting HyperFit parameters in the normal unit vectors
after a call to 'optimize' or one of the MCMC routines. Otherwise zeros.
vert_scat: float
Holds the best-fitting scatter in the vertical axis of the data coordinates, after a call to 'optimize'
or one of the MCMC routines. Otherwise zero.
norm_scat: float
Holds the best-fitting scatter normal to the plane, after a call to 'optimize'
or one of the MCMC routines. Otherwise zero.
normal_bounds: sequence
Holds the prior bounds in the normal unit vectors, after bounds in the data coordinates have
been passed to a call to 'optimize' or one of the MCMC routines. Otherwise None.
Args
----
data: ndarray
The N x D dimensional data vector
cov: ndarray
The N x N x D dimensional set of covariance matrices.
weights: ndarray, optional
D dimensional array of weights for each data. Default is None, in which can unit weights are assumed
for each data point.
vertaxis: float, optional
Specifies which of the coordinate axis is to be treated as the 'vertical' axis (i.e,. 'y' for 2D data).
Default is -1, in which case the last axis will be treated as vertical.
"""
# Initialise the necessary parameters and perform checks
def __init__(self, data, cov, weights=None, vertaxis=-1):
self.ndims = np.shape(data)[0]
self.ndata = np.shape(data)[1]
self.data = data
self.cov = cov
self.weights = np.ones(self.ndata) if weights is None else weights
self.vertaxis = vertaxis
# Some variables to store the two sets of fitting coordinates and scatter parameters
self.coords = np.zeros(self.ndims)
self.normal = np.zeros(self.ndims)
self.vert_scat = 0.0
self.norm_scat = 0.0
self.normal_bounds = None
# Code to compute normal vectors from cartesian coordinates
def compute_normal(self, coords=None, vert_scat=None):
"""Converts from data coordinates to the normal vector.
Args
----
coords : ndarray, optional
N x M dimensional array of coordinates. Default is None, which means use the values
stored in the self.coords attribute.
vert_scat : ndarray, optional
M dimensional array of scatter values. Default is None, which means use the values
stored in the self.vert_scat attribute.
Returns
-------
normal : ndarray
N x M dimensional array of normal unit vectors.
norm_scat : ndarray
M dimensional array of scatters normal to the N-1 dimensional plane.
"""
if coords is None:
coords = self.coords.reshape(self.ndims, -1)
if vert_scat is None:
vert_scat = self.vert_scat
alpha = copy.copy(coords)
alpha[self.vertaxis] = -1
beta = coords[self.vertaxis]
normalpha = np.sum(alpha ** 2, axis=0)
unitalpha = alpha / normalpha
normal = -unitalpha * beta
norm_scat = np.fabs(vert_scat / np.sqrt(normalpha))
# print(coords, vert_scat, normal, norm_scat)
return normal, norm_scat
# Code to compute cartesian coordinates from normal vectors
def compute_cartesian(self, normal=None, norm_scat=None):
"""Converts from the normal vector to the data coordinates.
Args
----
normal : ndarray, optional
N x M dimensional array of unit vectors. Default is None, which means use the values
stored in the self.normal attribute.
norm_scat : ndarray, optional
M dimensional array of scatter values normal to the plane. Default is None, which means
use the values stored in the self.norm_scat attribute.
Returns
-------
coords : float
N x M dimensional array of points in the data coordinates.
vert_scat: float
M dimensional array of scatters along the vertical axis of the data.
"""
if normal is None:
normal = self.normal
if norm_scat is None:
norm_scat = self.norm_scat
nTn = np.sum(normal ** 2, axis=0)
coords = -normal / normal[self.vertaxis]
coords[self.vertaxis] = nTn / normal[self.vertaxis]
vert_scat = np.fabs(norm_scat * np.sqrt(nTn) / np.fabs(normal[self.vertaxis]))
return coords, vert_scat
def bessel_cochran(self, sigma):
"""Corrects the sample scatter to the population scatter using the Bessel and Cochran corrections.
The intrinsic scatter fit from the likelihood is generally not equal to the underlying population scatter
This is 1) because the standard deviation is estimated from a finite number of data samples, and 2) because
the maximum likelihood value of the variance is not the maximum likelihood value of the standard deviation.
These are corrected by the so-called Bessel and Cochran corrections respectively. This function applies these
corrections based on the number of data points and dimensionality of the fitted plane.
Args
----
sigma : ndarray
M dimensional array of scatter values.
Return
------
sigma_corr : ndarray
M dimensional array of corrected scatter values.
"""
sigma_corr = (
np.sqrt(0.5 * self.ndata)
* np.exp(loggamma(0.5 * (self.ndata - self.ndims)) - loggamma(0.5 * (self.ndata - self.ndims + 1.0)))
) * sigma
return sigma_corr
# The posterior function
def _lnpost(self, params):
if params.ndim == 1:
params = params.reshape(-1, len(params))
lnpost = np.sum(self.weights * self._lnlike(params), axis=-1) + self._lnprior(params)
return lnpost
# The flat prior for the fit
def _lnprior(self, params):
lnprior = np.zeros(len(params))
for i, (param, bounds) in enumerate(zip(params.T, self.normal_bounds)):
lnprior += np.where(np.logical_or(param < bounds[0], param > bounds[1]), -np.inf, 0.0)
return lnprior
# The log-likelihood function for each data point
def _lnlike(self, params):
if params.ndim == 1:
params = params.reshape(-1, len(params))
nTn = np.sum(params[:, :-1] ** 2, axis=1)
nTcn = np.einsum("ki,ijd,kj->dk", params[:, :-1], self.cov, params[:, :-1])
orthvariance = params[:, -1] ** 2 + np.where(nTn > 0, nTcn / nTn, 0)
originoffset = np.where(
nTn > 0, np.einsum("ki,id->dk", params[:, :-1], self.data) / np.sqrt(nTn) - np.sqrt(nTn), 0.0
)
lnlike = -0.5 * (np.log(orthvariance) + (originoffset ** 2) / orthvariance)
return lnlike.T
# Convert a sequence of bounds in data coordinates to bounds on the normal vector by optimizing for the maximum
# and minimum values of each normal vector coordinate across the original parameter space.
def _convert_bounds(self, bounds):
normal_bounds = []
for i in range(len(bounds) - 1):
new_min = differential_evolution(
lambda x: self.compute_normal(coords=x[:-1], vert_scat=x[-1])[0][i], bounds, tol=1.0e-6
)["fun"]
new_max = -differential_evolution(
lambda x: -self.compute_normal(coords=x[:-1], vert_scat=x[-1])[0][i], bounds, tol=1.0e-6
)["fun"]
normal_bounds.append((new_min, new_max))
new_min = differential_evolution(
lambda x: self.compute_normal(coords=x[:-1], vert_scat=x[-1])[1], bounds, tol=1.0e-6
)["fun"]
new_max = -differential_evolution(
lambda x: -self.compute_normal(coords=x[:-1], vert_scat=x[-1])[1], bounds, tol=1.0e-6
)["fun"]
normal_bounds.append((new_min, new_max))
normal_bounds = tuple(normal_bounds)
return normal_bounds
def get_sigmas(self, normal=None, norm_scat=None):
"""Calculates the offset between each data point and a plane in
units of the standard deviation, i.e., in terms of x-sigma.
Args
----
normal : ndarray, optional
N x M dimensional array of unit vectors. Default is None, which means use the values
stored in the self.normal attribute.
norm_scat : ndarray, optional
M dimensional array of scatter values normal to the plane. Default is None, which means
use the values stored in the self.norm_scat attribute.
Returns
-------
sigmas: ndarray
D x M dimensional array containing the offsets of the D data points, in units of the
standard deviation from the M models.
"""
if normal is None:
normal = self.normal.reshape(self.ndims, -1)
if norm_scat is None:
norm_scat = self.norm_scat
nTn = np.sum(normal ** 2, axis=0)
nTcn = np.einsum("ik,ijd,jk->dk", normal, self.cov, normal)
orthvariance = norm_scat ** 2 + np.where(nTn > 0, nTcn / nTn, 0)
originoffset = np.where(nTn > 0, np.einsum("ik,id->dk", normal, self.data) / np.sqrt(nTn) - np.sqrt(nTn), 0.0)
return np.sqrt((originoffset ** 2) / orthvariance)
def optimize(self, bounds, tol=1.0e-6, verbose=False):
"""Find the best-fitting line/plane/hyperplane.
Fits the N x D dimensional self.data using scipy.optimise's basinhopping + Nelder-Mead algorithm. Pretty robust.
Args
----
bounds : sequence
Bounds for variables. Must be a set of N + 1 (min, max) pairs, one for each free parameter,
defining the finite lower and upper bounds. Passed straight through to scipy.differential_evolution
tol: float, optional
The optimisation tolerance.
verbose : bool, optional
If True prints out the full dictionary returned by scipy.optimize.basinhopping.
Return
------
coords : ndarray
N dimensional array containing the best-fitting parameters.
vert_scat: float
The scatter in the vertical axis, corrected using the Bessel-Cochran correction.
log_posterior: float
The log posterior at the best-fitting parameters.
Raises
------
ValueError: If the number of pairs in 'bounds' is not equal to N + 1.
Note
----
If you want to access the best-fitting parameters in the normal coordinates and the scatter normal to the plane,
these are stored in the self.normal and self.norm_scat class attributes respectively following a call to optimize.
"""
if len(bounds) != self.ndims + 1:
raise ValueError("Number of bounds (min, max) pairs not equal to N dimensions + 1")
self.normal_bounds = self._convert_bounds(bounds)
result = differential_evolution(lambda *args: -self._lnpost(*args), self.normal_bounds, tol=tol)
if verbose:
print(result)
self.normal = result["x"][:-1]
self.norm_scat = np.fabs(result["x"][-1])
self.norm_scat = self.bessel_cochran(self.norm_scat)
self.coords, self.vert_scat = self.compute_cartesian()
return self.coords, self.vert_scat, -np.atleast_1d(result["fun"])[0]
# A routine run a zeus MCMC on the model given the data
def zeus(self, bounds, max_iter=100000, batchsize=1000, ntau=10.0, tautol=0.05, verbose=False):
"""Run an MCMC on the data using the zeus sampler (Karamanis and Beutler 2020).
The MCMC runs in batches, checking convergence at the end of each batch until either the chain is well converged
or the maximum number of iterations has been reached. Convergence is defined as the point when the chain is longer
than ntau autocorrelation lengths, and the estimate of the autocorrelation length varies less than tautol between batches.
Burn-in is then removed from the samples, before they are flattened and returned.
Args
----
bounds : sequence
Bounds for variables. Must be a set of N + 1 (min, max) pairs, one for each free parameter,
defining the finite lower and upper bounds. Passed straight through to scipy.differential_evolution, and
used to set the prior for the MCMC sampler.
max_iter: int, optional
The maximum number of MCMC iterations.
batchsize : int, optional
The size of each batch, between which we check convergence.
ntau: float, optional
The minimum number of autocorrelation lengths to require before convergence.
tautol: float, optional
The maximum fractional deviation between successive values of the autocorrelation length required for convergence.
verbose: bool, optional
Whether or not to print out convergence statistics and progress.
Return
------
mcmc_samples : ndarray
(N + 1) x Nsamples dimensional array containing the flattened, burnt-in MCMC samples. First N dimensions
are the parameters of the plane. Last dimension is intrinsic scatter in the vertical axis.
mcmc_lnlike : ndarray
Nsamples dimensional array containing the log-likelihood for each MCMC sample.
Raises
------
ValueError: If the number of values in 'begin' is not equal to N + 1.
Note
----
Also calls 'optimize' and stores the results in the relevant class attributes if you want to access the best-fit.
"""
if len(bounds) != self.ndims + 1:
raise ValueError("Number of bounds (min, max) pairs not equal to N dimensions + 1")
# Set up Zeus. Start the walkers in a small 1 percent ball around the best fit
self.optimize(bounds)
nwalkers = 4 * (self.ndims + 1)
begin = [
[(0.01 * np.random.rand() + 0.995) * j for j in np.concatenate([self.normal, [self.norm_scat]])]
for _ in range(nwalkers)
]
sampler = zeus.EnsembleSampler(nwalkers, self.ndims + 1, self._lnpost, vectorize=True, verbose=False)
old_tau = np.inf
niter = 0
converged = 0
while ~converged:
sampler.run_mcmc(begin, nsteps=batchsize)
tau = zeus.AutoCorrTime(sampler.get_chain(discard=0.5))
converged = np.all(ntau * tau < niter)
converged &= np.all(np.abs(old_tau - tau) / tau < tautol)
old_tau = tau
begin = None
niter += 1000
if verbose:
print("Niterations/Max Iterations: ", niter, "/", max_iter)
print("Integrated ACT/Min Convergence Iterations: ", tau, "/", np.amax(ntau * tau))
if niter >= max_iter:
break
# Remove burn-in and and save the samples
tau = zeus.AutoCorrTime(sampler.get_chain(discard=0.5))
burnin = int(2 * np.max(tau))
samples = sampler.get_chain(discard=burnin, flat=True).T
mcmc_samples = np.vstack(self.compute_cartesian(normal=samples[:-1, :], norm_scat=samples[-1, :]))
mcmc_lnlike = sampler.get_log_prob(discard=burnin, flat=True)
return mcmc_samples, mcmc_lnlike
# A routine run a emcee MCMC on the model given the data
def emcee(self, bounds, max_iter=100000, batchsize=1000, ntau=50.0, tautol=0.05, verbose=False):
"""Run an MCMC on the data using the emcee sampler (Foreman-Mackay et. al., 2013).
The MCMC runs in batches, checking convergence at the end of each batch until either the chain is well converged
or the maximum number of iterations has been reached. Convergence is defined as the point when the chain is longer
than ntau autocorrelation lengths, and the estimate of the autocorrelation length varies less than tautol between batches.
Burn-in is then removed from the samples, before they are flattened and returned.
Args
----
bounds : sequence
Bounds for variables. Must be a set of N + 1 (min, max) pairs, one for each free parameter,
defining the finite lower and upper bounds. Passed straight through to scipy.differential_evolution, and
used to set the prior for the MCMC sampler.
max_iter: int, optional
The maximum number of MCMC iterations.
batchsize : int, optional
The size of each batch, between which we check convergence.
ntau: float, optional
The minimum number of autocorrelation lengths to require before convergence.
tautol: float, optional
The maximum fractional deviation between successive values of the autocorrelation length required for convergence.
verbose: bool, optional
Whether or not to print out convergence statistics and progress.
Return
------
mcmc_samples : ndarray
(N + 1) x Nsamples dimensional array containing the flattened, burnt-in MCMC samples. First N dimensions
are the parameters of the plane. Last dimension is intrinsic scatter in the vertical axis.
mcmc_lnlike : ndarray
Nsamples dimensional array containing the log-likelihood for each MCMC sample.
Raises
------
ValueError: If the number of values in 'begin' is not equal to N + 1.
Note
----
Also calls 'optimize' and stores the results in the relevant class attributes if you want to access the best-fit.
"""
if len(bounds) != self.ndims + 1:
raise ValueError("Number of bounds (min, max) pairs not equal to N dimensions + 1")
# Set up emcee. Start the walkers in a small 1 percent ball around the best fit
self.optimize(bounds, verbose=verbose)
nwalkers = 4 * (self.ndims + 1)
begin = [
[(0.01 * np.random.rand() + 0.995) * j for j in np.concatenate([self.normal, [self.norm_scat]])]
for _ in range(nwalkers)
]
sampler = emcee.EnsembleSampler(nwalkers, self.ndims + 1, self._lnpost, vectorize=True)
old_tau = np.inf
niter = 0
converged = 0
while ~converged:
sampler.run_mcmc(begin, nsteps=batchsize, progress=verbose)
tau = sampler.get_autocorr_time(discard=int(0.5 * niter), tol=0)
converged = np.all(ntau * tau < niter)
converged &= np.all(np.abs(old_tau - tau) / tau < tautol)
old_tau = tau
begin = None
niter += 1000
if verbose:
print("Niterations/Max Iterations: ", niter, "/", max_iter)
print("Integrated ACT/Min Convergence Iterations: ", tau, "/", np.amax(ntau * tau))
if niter >= max_iter:
break
# Remove burn-in and and save the samples
tau = sampler.get_autocorr_time(discard=int(0.5 * niter), tol=0)
burnin = int(2 * np.max(tau))
samples = sampler.get_chain(discard=burnin, flat=True).T
mcmc_samples = np.vstack(self.compute_cartesian(normal=samples[:-1], norm_scat=samples[-1]))
mcmc_lnlike = sampler.get_log_prob(discard=burnin, flat=True)
return mcmc_samples, mcmc_lnlike
# A routine to run snowline on the model given the data
def snowline(
self,
bounds,
num_global_samples=400,
num_gauss_samples=400,
max_ncalls=100000,
min_ess=400,
max_improvement_loops=4,
heavytail_laplaceapprox=True,
verbose=False,
):
"""Get posterior samples and Bayesian evidence using the snowline package (https://johannesbuchner.github.io/snowline/).
Input kwargs are passed directly to snowline and are named the same, so see the snowline documentation
for more details on these. self.optimize is also called even though snowline runs it's own optimisation
to ensure some useful attributes are stored, and for consistency with the emcee and zeus functions.
Args
----
bounds : sequence
Bounds for variables. Must be a set of N + 1 (min, max) pairs, one for each free parameter,
defining the finite lower and upper bounds. Passed straight through to scipy.differential_evolution, and
used to set the prior for the MCMC sampler.
num_global_samples: int, optional
Number of samples to draw from the prior.
num_gauss_samples: int, optional
Number of samples to draw from initial Gaussian likelihood approximation before improving the approximation.
max_ncalls: int, optional
Maximum number of likelihood function evaluations.
min_ess: int, optional
Number of effective samples to draw.
max_improvement_loops: float, optional
Number of times the proposal should be improved.
heavytail_laplaceapprox: bool, optional
If False, use laplace approximation as initial gaussian proposal.
If True, use a gaussian mixture, including the laplace approximation but also wider gaussians.
Return
------
mcmc_samples : ndarray
(N + 1) x Nsamples dimensional array containing the flattened, burnt-in MCMC samples. First N dimensions
are the parameters of the plane. Last dimension is intrinsic scatter in the vertical axis.
mcmc_lnlike : ndarray
Nsamples dimensional array containing the log-likelihood for each MCMC sample.
logz : float
The Bayesian evidence.
logzerr: float
Error on the Bayesian evidence.
Raises
------
ValueError: If the number of values in 'begin' is not equal to N + 1.
Note
----
Also calls 'optimize' and stores the results in the relevant class attributes if you want to access the best-fit.
"""
if len(bounds) != self.ndims + 1:
raise ValueError("Number of bounds (min, max) pairs not equal to N dimensions + 1")
# Run the optimizer. Redundant as snowline also does an optimization, but helps set other things up too and store
# some useful attributes, so done for consistency with other routines.
self.optimize(bounds)
paramnames = [f"$\\alpha_{{{i}}}$" for i in range(self.ndims)] + [f"$\\sigma_{{\\perp}}$"]
# Run Snowline
sampler = snowline.ReactiveImportanceSampler(
paramnames, lambda x: self._lnpost(x)[0], transform=self.snowline_transform
)
sampler.run(
num_global_samples=num_global_samples,
num_gauss_samples=num_gauss_samples,
max_ncalls=max_ncalls,
min_ess=min_ess,
max_improvement_loops=max_improvement_loops,
heavytail_laplaceapprox=heavytail_laplaceapprox,
verbose=verbose,
)
samples = sampler.results["samples"].T
mcmc_samples = np.vstack(self.compute_cartesian(normal=samples[:-1], norm_scat=samples[-1]))
mcmc_lnlike = self._lnpost(sampler.results["samples"])
return mcmc_samples, mcmc_lnlike, sampler.results["logz"], sampler.results["logzerr"]
def snowline_transform(self, x):
newx = [
(self.normal_bounds[i][1] - self.normal_bounds[i][0]) * x[i] + self.normal_bounds[i][0]
for i in range(len(x))
]
return | np.array(newx) | numpy.array |
import numpy as np
import xarray as xr
from xrspatial import a_star_search
from xrspatial.tests._crs import _add_EPSG4326_crs_to_da
def test_a_star_search():
agg = xr.DataArray(np.array([[0, 1, 0, 0],
[1, 1, 0, 0],
[0, 1, 2, 2],
[1, 0, 2, 0],
[0, 2, 2, 2]]),
dims=['lat', 'lon'])
# add crs for tests
agg = _add_EPSG4326_crs_to_da(agg)
height, width = agg.shape
_lon = np.linspace(0, width - 1, width)
_lat = np.linspace(0, height - 1, height)
agg['lon'] = _lon
agg['lat'] = _lat
barriers = []
# no barriers, there always path from a start location to a goal location
for x0 in _lon:
for y0 in _lat:
start = (x0, y0)
for x1 in _lon:
for y1 in _lat:
goal = (x1, y1)
path_agg = a_star_search(agg, start, goal, barriers,
'lon', 'lat')
assert isinstance(path_agg, xr.DataArray)
assert type(path_agg.values[0][0]) == np.float64
assert path_agg.shape == agg.shape
assert path_agg.dims == agg.dims
assert path_agg.attrs == agg.attrs
for c in path_agg.coords:
assert (path_agg[c] == agg.coords[c]).all()
if start == goal:
assert np.nanmax(path_agg) == 0 and \
np.nanmin(path_agg) == 0
else:
assert np.nanmax(path_agg) > 0 and \
np.nanmin(path_agg) == 0
barriers = [1]
# set pixels with value 1 as barriers,
# cannot go from (0, 0) to anywhere since it is surrounded by 1s
start = (0, 0)
for x1 in _lon:
for y1 in _lat:
goal = (x1, y1)
if goal != start:
path_agg = a_star_search(agg, start, goal, barriers,
'lon', 'lat')
assert isinstance(path_agg, xr.DataArray)
assert type(path_agg.values[0][0]) == np.float64
assert path_agg.shape == agg.shape
assert path_agg.dims == agg.dims
assert path_agg.attrs == agg.attrs
for c in path_agg.coords:
assert (path_agg[c] == agg.coords[c]).all()
# no path, all cells in path_agg are nans
assert np.isnan(path_agg).all()
# test with nans
agg = xr.DataArray(np.array([[0, 1, 0, 0],
[1, 1, np.nan, 0],
[0, 1, 2, 2],
[1, 0, 2, 0],
[0, np.nan, 2, 2]]),
dims=['lat', 'lon'])
height, width = agg.shape
_lon = np.linspace(0, width - 1, width)
_lat = np.linspace(0, height - 1, height)
agg['lon'] = _lon
agg['lat'] = _lat
# start and end at a nan pixel, coordinate in (lon, lat) format
# in this example, each pixel is a unit of lon and lat,
# start = (2, 1) corresponds to pixel at (1, 2),
# goal = (1, 4) corresponds to pixel at (4, 1)
start = (2, 1)
goal = (1, 4)
# no barriers
barriers = []
# no snap
no_snap_path_agg = a_star_search(agg, start, goal, barriers, 'lon', 'lat')
# no path, all cells in path_agg are nans
assert np.isnan(no_snap_path_agg).all()
# set snap_start = True, snap_goal = False
snap_start_path_agg = a_star_search(agg, start, goal, barriers,
'lon', 'lat', snap_start=True)
# no path, all cells in path_agg are nans
assert np.isnan(snap_start_path_agg).all()
# set snap_start = False, snap_goal = True
snap_goal_path_agg = a_star_search(agg, start, goal, barriers,
'lon', 'lat', snap_goal=True)
# no path, all cells in path_agg are nans
assert | np.isnan(snap_goal_path_agg) | numpy.isnan |
import os
import math
import cv2 as cv
import scipy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import describe, linregress
from scipy.signal import detrend
from matplotlib.animation import FuncAnimation
#~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR IDENTIFYING SURFACE LINE~~~~~~~~~~~~~~~~~~
# these functions help identify the surface line in PLIF images
def _get_frame(cap: cv.VideoCapture, N: int) -> np.ndarray :
"""
Get the Nth frame from the video capture in grayscale
Return the nth frame from an opencv video capture object as greyscale or
None if it fails.
Raises TypeError for some inputs. Raises IndexError if N is out of bounds.
Raises AssertionError is video capture is not open.
"""
if not isinstance(cap,cv.VideoCapture):
raise TypeError("cap must be an opencv video capture object")
elif not cap.isOpened():
raise AssertionError("cap must be open")
elif not isinstance(N,int):
raise TypeError("N must be an int")
frame_count = cap.get(cv.CAP_PROP_FRAME_COUNT)
# Apparently, frameCount == -2147483648 or -1 for single image sequence
if frame_count < 0:
frame_count = 1
if not 0<=N<frame_count:
raise IndexError("N must be positive and <= frame count of cap")
# cap.set is expensive, only use if needed
if cap.get(cv.CAP_PROP_POS_FRAMES) != N:
cap.set(cv.CAP_PROP_POS_FRAMES, N)
ret_frame, frame = cap.read()
if ret_frame:
if len(frame.shape) == 2:
pass # already greyscale
elif frame.shape[2] == 3:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
elif frame.shape[2] == 4:
frame = cv.cvtColor(frame, cv.COLOR_BGRA2GRAY)
else:
raise TypeError("video source not supported")
return frame
else:
return None
def _get_grad_phase(src: np.ndarray) -> "tuple of np.ndarray" :
"""
Return the gradient and phase of the grayscale image
Return the gradient and phase of a grayscale image or None if it fails.
Uses Scharr gradient estimation. Normalizes quantites to use the entire
dynamic range of the src image data type.
Raises TypeError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
if not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
gradx = cv.Scharr(src, cv.CV_32F, 1, 0, 3)
grady = cv.Scharr(src, cv.CV_32F, 0, 1, 3)
grad = cv.magnitude(gradx, grady)
phase = cv.phase(gradx, grady)
if src.dtype == np.uint8:
kwargs = {'alpha':0,'beta':255,'norm_type':cv.NORM_MINMAX,
'dtype':cv.CV_8UC1}
else: # otherwise np.uint16
kwargs = {'alpha':0,'beta':65535,'norm_type':cv.NORM_MINMAX,
'dtype':cv.CV_16UC1}
grad = cv.normalize(grad , grad , **kwargs)
phase = cv.normalize(phase, phase, **kwargs)
return grad, phase
def _get_mask_from_gradient(src: np.ndarray, k: int) -> np.ndarray :
"""
Identifies large values of an image gradient with a binary mask.
Return a binary mask isolating the values of src that are sufficiently
large. Sufficiently large is determined by clustering the image in to k
parts, then defining the background as the cluster with the largest number
of elements. All other clusters are considered sufficently large and their
locations in the image are marked 1 in the binary mask. The background
is marked 0 in the binary mask.
Raises TypeError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
if not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
# Prepare the src for clustering
clusterable = np.array(src.ravel(), dtype=np.float32)
# kmeans requires some initial guess to iteratively improve
# Using this inital label seems to be more reliable than using PP or random
labels = np.zeros(clusterable.shape, dtype=np.int32)
labels[ np.argwhere(clusterable == clusterable.max()) ] = k-1
# generate and shape label array
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
_, labels, centers = cv.kmeans(clusterable, k, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS)
labels = labels.reshape(-1, src.shape[0])
# exclude the background label from a binary mask where the background label
# has the smallest gradient value among the cluster centers, all other labels
# are included. The background label can be identified by noting that the
# center values are organized like: center[label] = gradient_value
dst = np.ones(src.shape, dtype=src.dtype)
dst[ labels == np.argmin(centers) ] = 0
return dst
def _get_mask_from_phase(src: np.ndarray, mask: np.ndarray,
direction: "'low' or 'high'") -> np.ndarray :
"""
Identifies the low or high phase of an image gradient with a binary mask.
Return a binary mask identifying a low valued cluster or the high valued
cluster as indicated by the directio input. The background cluster is
assumed to be the cluster with the largest count and is ignored.
Raises a TypeError or a ValueError for some inputs.
"""
if not isinstance(src,np.ndarray):
raise TypeError("src must be a numpy array")
elif not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (src.dtype == np.uint8 or src.dtype == np.uint16):
raise TypeError("src must have type np.uint8 or np.uint16")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
elif not len(src.shape) == len(mask.shape) == 2:
raise ValueError("src and mask must have two dimensions (grayscale)")
elif not (direction == 'low' or direction == 'high'):
raise ValueError("direction must be 'low' or 'high'")
# make them the same dtype but preserve the dynamic range of src
if src.dtype != mask.dtype:
mask = np.array(mask,dtype=mask.dtype)
# identify the foreground cluster with the correct directionality
clusterable = np.array(np.multiply(src,mask).ravel(), dtype=np.float32)
labels = np.zeros(clusterable.shape,dtype=np.int32)
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 1.0)
# phase is normalized to take up the entire dynamic range, so choose to
# split the mask down the middle into an 'low' and 'high' phase
mid = 255//2 if (src.dtype == np.uint8) else 65535//2
# low phase is in the lower half and nonzero
labels[ np.argwhere(np.logical_and(clusterable > 0, clusterable < mid)) ] = 1
# high phase is in the upper half
labels[ np.argwhere(clusterable > mid) ] = 2
# TODO: determine if this clustering actually improves results
# compared to a simple binary threshold
_, labels, centers = cv.kmeans(clusterable, 3, labels, criteria, 1,
cv.KMEANS_USE_INITIAL_LABELS )
labels = np.array(labels.reshape(-1, src.shape[0]), dtype=src.dtype)
# To identify the low and high labels, must also identify the background
# label which is assumed to be the largest group by count
# recall phase data is clustered like: centers[label] = phase_val
label_by_count = np.argsort(np.bincount(labels.ravel()))
label_by_phase = np.argsort(centers.ravel())
background_label = label_by_count[-1]
label_by_phase_excluding_background = np.delete(
label_by_phase, np.where(label_by_phase == background_label))
low_label = label_by_phase_excluding_background[ 0]
high_label = label_by_phase_excluding_background[-1]
choose_label = int(low_label) if direction=='low' else int(high_label)
return cv.compare(labels,(choose_label,0,0,0),cv.CMP_EQ)
def _get_widest_connected_group(mask: np.ndarray) -> np.ndarray:
'''
Identifes the widest group (uppermost in case of ties) in the binary image.
Find the widest connected group in the binary mask. If there are multiple,
choose the uppermost among them. Requires an uint8 type image but assumes
that the input image is a binary mask (no check).
Raises a TypeError for some inputs.
'''
if not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8):
raise TypeError("mask must have type np.uint8")
num_groups, labels, stats, centroids = \
cv.connectedComponentsWithStats(mask,connectivity=8)
# identify candidates of connected components by area
idx_candidates = np.argsort(stats[:,cv.CC_STAT_AREA])[:-1]
# among the valid candidates, sort by width of connected components
stats_width = stats[idx_candidates,cv.CC_STAT_WIDTH]
widest_groups = np.argwhere(stats_width == np.amax(stats_width))
# among the widest groups, choose the one closes to top of image
# recall that the y axis for images is flipped
top_group = np.argmin(stats[idx_candidates,cv.CC_STAT_TOP][widest_groups])
# create a new mask from the label of the widest & highest cluster
mask_new = np.zeros(labels.shape, dtype=bool)
label = idx_candidates[widest_groups[top_group]]
mask_new[labels == label] = 1
return np.multiply(mask,mask_new)
def _get_mask_maxima(grad: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""
Finds the local maxima of an image gradeint where the mask is 1.
Returns a binary mask where the values are local maxima or a plateau
edge of grad. Applies the input mask before finding the local maxima.
Assumes (no check) that the mask is binary.
Raises a TypeError for some inputs.
"""
if not isinstance(grad,np.ndarray):
raise TypeError("grad must be a numpy array")
elif not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
se = np.array([1,0,1],dtype=np.uint8).reshape(-1,1)
grad_masked = np.multiply(grad,mask)
local_max = cv.dilate(grad_masked, se)
local_max = cv.compare(grad_masked,local_max,cv.CMP_GE)
return np.multiply(local_max,mask)
def _get_surfaceline(mask: np.ndarray, side: "'lower' or 'upper'") \
-> np.ndarray:
"""
Identifes the surface line from a binary mask.
Returns a 1 dimensional numpy array with the pixel values of the uppermost
or lowermost values in mask.
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(mask,np.ndarray):
raise TypeError("mask must be a numpy array")
elif not (mask.dtype == np.uint8 or mask.dtype == np.uint16):
raise TypeError("mask must have type np.uint8 or np.uint16")
elif not (side=='upper' or side=='lower'):
raise ValueError("direction must be 'low' or 'high'")
# TODO: why convert uint8 or uint16 into binary mask?
# just require a binary array in the first place?
# accept any non-zero value of the mask, mask must be converted to binary
mask = mask>0
n,m = mask.shape
if side=='upper':
args = (0,n,n)
else: # side=='lower'
args = (n,0,n)
weight_y = np.linspace(*args,dtype=int).reshape(-1,1).repeat(m,axis=1)
line = np.argmax(weight_y*mask,axis=0)
# TODO: replace this with numpy functions
# when columns are all 0, line returns an invalid point, replace with -1
for i, j in enumerate(line):
if mask[j,i]==0:
line[i] = -1
return line.ravel()
def _get_supersample(line: np.ndarray, grad: np.ndarray) -> np.ndarray:
"""
Identifes the supersample interpolation along the surface line of grad.
Returns a tuple of 1 dimensional numpy arrays. The first returns line
with values replaced to be negative if the supersample is invalid. The second
returns the supersample of the gradient or 0 if the supersample is invalid.
Negative values in the first array correspond to the following meanings:
-1 : no identified maxima in column
-2 : identified maxima is not a local maxima (all equal)
-3 : identified maxima is not a local maxima (on a line)
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(line,np.ndarray):
raise TypeError("line must be a numpy array")
elif not isinstance(grad,np.ndarray):
raise TypeError("grad must be a numpy array")
elif not len(line.shape) == 1:
raise ValueError("line must have one dimension")
elif not len(grad.shape) == 2:
raise ValueError("grad must have two dimensions")
supersample = np.zeros(line.shape)
# TODO: replace loop with array operations
for i,j in enumerate(line):
try:
upper = int(grad[j-1,i])
center = int(grad[j ,i])
lower = int(grad[j+1,i])
except IndexError:
line[i] = -1
continue
numerator = upper - lower
denominator = 2*upper + 2*lower - 4*center
if j == -1:
pass
elif upper==center and lower==center and upper==lower:
line[i] = -2
elif numerator!=0 and denominator==0:
line[i] = -3
else:
supersample[i] = numerator/denominator
# useful for debugging
#if not np.isfinite(supersample).all():
# print(f"non-finite value at {i}, {j}")
# print(f"numerator: {numerator}")
# print(f"denominator: {denominator}")
# raise ValueError
return line, supersample
# The following functions each handle different combinations of the input
# values to lif(), this is explicit but perhaps too verbose.
def _loop_phase_mask_connected(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = True
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_maxima = _get_mask_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_connected_calibrate(cap: cv.VideoCapture,
num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple,) \
-> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = True
calibration_params = Tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_connected = _get_widest_connected_group(mask_phase)
mask_maxima = _get_mask_maxima(grad,mask)*mask_connected
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_maxima = _get_mask_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_phase_mask_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = True
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_phase = _get_mask_from_phase(phase,mask,direction)
mask_maxima = _get_mask_maxima(grad,mask)*mask_phase
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_maxima = _get_mask_maxima(grad,mask)
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_local_maxima_calibrate(cap: cv.VideoCapture, num_frames: int,
k: int, direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = False
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, phase = _get_grad_phase(frame)
mask = _get_mask_from_gradient(grad, k)
mask_maxima = _get_mask_maxima(grad,mask)
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_maxima(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'") -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = True
use_phase_mask = False
connected = False
calibration_params = None
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
grad, _ = _get_grad_phase(frame)
mask_maxima = np.zeros(grad.shape, dtype=np.uint8)
mask_maxima[np.argmax(grad,axis=0),np.arange(width)] = 1
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def _loop_maxima_calibrate(cap: cv.VideoCapture, num_frames: int, k: int,
direction: "'low' or 'high'",
side: "'lower' or 'upper'",
calibration_params: tuple) -> np.ndarray:
'''
Performs LIF for a specific case in the lif function.
Assumes valid input.
Considers the the case:
use_column_max = True
use_phase_mask = False
connected = False
calibration_params = tuple
'''
width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
surface = np.empty((num_frames,width))
for num in range(num_frames):
frame = _get_frame(cap,num)
frame = cv.undistort(frame, calibration_params[0], calibration_params[1])
grad, _ = _get_grad_phase(frame)
mask_maxima = np.zeros(grad.shape, dtype=np.uint8)
mask_maxima[np.argmax(grad,axis=0),np.arange(width)] = 1
line = _get_surfaceline(mask_maxima,side)
line , supersample = _get_supersample(line, grad)
surface[num,:] = line + supersample
return surface
def lif(cap: cv.VideoCapture, direction: "'low' or 'high'",
side: "'lower' or 'upper'", N: "int or None" = None,
calibration_params: "tuple or None" = None, k: int = 3,
use_phase_mask : bool = True, connected : bool = True,
use_column_max : bool = False) -> np.ma.array:
'''
Performs lif analysis on an opencv video capture.
Imports each frame from cap as a grayscale image and performs LIF analysis
on each frame. Returns identified elevation of the surface line as a
numpy array with shape (N,M) where N is as specified or the number of frames
in cap (if unspecified) and M is the width of the images in cap.
The argument 'direction' refers to the direction of the gradient where 'low'
roughly corresponds with pi radians, and 'high' roughly corresponds to 3 pi
radians. The argument 'side' refers to which side of masked regions it will
attempt to identify, where 'lower' is the lowermost index value, and 'upper'
is the uppermost index value within the mask. The argument 'k' allows for
adjusting the sensitivity when identifying large gradients, higher values of
k mean more compute time but allows for smaller local gradient maxima. The
argument calibration_params should be a tuple with two values where the
first value in the tuple is the camera matrix and the second value is the
distortion coefficients as in OpenCV's undistort. The argument use_phase_mask
is a boolean to specify if the phase of the gradient should be used to
identify the surface. The argument connected is a boolean to specify if the
identify surface should be connected (will only return a connected surface).
The agrument use_column_max is used to determine if a global maximum should be
used to identify surface. If use_column_max is True then use_phase_mask and
connected arguments are ignored.
Raises a TypeError or ValueError for some inputs.
'''
if not isinstance(cap,cv.VideoCapture):
raise TypeError("cap must be an opencv video capture object")
elif not (direction == 'low' or direction == 'high'):
raise ValueError("direction must be 'low' or 'high'")
elif not (side == 'lower' or side == 'upper'):
raise ValueError("side must be 'lower' or 'upper'")
elif not (isinstance(N,int) or N is None):
raise ValueError("N must be an int or None")
elif not (isinstance(k,int) and k>1):
raise ValueError("k must be an int greater than 1")
elif not isinstance(use_phase_mask,bool):
raise ValueError("use_phase_mask must be a bool")
elif not (isinstance(calibration_params,tuple) \
or calibration_params is None):
raise TypeError("calibration_params must be tuple or None")
elif not ( calibration_params is None or (type(calibration_params) is tuple
and len(calibration_params) == 2)):
raise ValueError("calibration_params must be tuple with two values")
elif not isinstance(use_column_max,bool):
raise ValueError("use_column_max must be a bool")
num_frames = int(cap.get(cv.CAP_PROP_FRAME_COUNT)) if N is None else N
if calibration_params is None:
args = (cap,num_frames,k,direction,side)
if use_column_max:
surface = _loop_maxima(*args)
elif use_phase_mask and connected:
surface = _loop_phase_mask_connected(*args)
elif use_phase_mask and not connected:
surface = _loop_phase_mask(*args)
else:
surface = _loop_local_maxima(*args)
else:
args = (cap,num_frames,k,direction,side,calibration_params)
if use_column_max:
surface = _loop_maxima_calibrate(*args)
elif use_phase_mask and connected:
surface = _loop_phase_mask_connected_calibrate(*args)
elif use_phase_mask and not connected:
surface = _loop_phase_mask_calibrate(*args)
else:
surface = _loop_local_maxima_calibrate(*args)
return np.ma.masked_less(surface,0)
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR PLOTTING~~~~~~~~~~~~~~~~~~
def list_sequence_animation(xdata: list, ydata: list, name: str ='anim',
fig: "None or matplotlib figure" = None,
ax: "None or matplotlib axis" = None,
xlims: "None or tuple" = None,
ylims: "None or tuple" = None ) -> "matplotlib FuncAnimation" :
"""
Write an animation of the provided data.
Writes out an H264 encoded animation of the data by default. Each data
in the lists is animated with a different color, so that overlapping
measurements may be inspected manually.
"""
if not isinstance(xdata, list):
raise TypeError("xdata must be a list")
elif not isinstance(ydata, list):
raise TypeError("ydata must be a list")
elif not isinstance(name, str):
raise TypeError("name must be a string")
elif not (fig is None or isinstance(fig, matplotlib.figure.Figure)):
raise TypeError("fig must be a matplotlib figure")
elif not (ax is None or isinstance(ax, matplotlib.axes.Axes)):
raise TypeError("ax must be a matplotlib axis")
elif not (xlims is None or isinstance(xlims,tuple)):
raise TypeError("xlims must be None or tuple")
elif not (ylims is None or isinstance(ylims,tuple)):
raise TypeError("ylims must be None or tuple")
elif isinstance(xlims,tuple) and not len(xlims)==2:
raise ValueError("xlims must have length 2")
elif isinstance(ylims,tuple) and not len(ylims)==2:
raise ValueError("ylims must have length 2")
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is not None and ax is not None:
pass
else:
return None
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
lines = []
for i in range(len(xdata)):
lobj = ax.plot([], [], lw=2, color=colors[i])[0]
lines.append(lobj)
def init():
for line in lines:
line.set_data([],[])
return lines
def animate(t):
for lnum,line in enumerate(lines):
line.set_data(xdata[lnum], ydata[lnum][t,:])
return line,
num_frames = sorted([y.shape[0]-1 for y in ydata])[0]
anim = FuncAnimation(fig, animate, init_func=init,
frames=num_frames, interval=20, blit=True)
anim.save(name+'.mp4', fps=30, writer='ffmpeg')
return anim
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR FINDING HOMOGRAPHY~~~~~~~~~~~~~~~~~~
# These functions are designed to help process calibration board images into a
# homography matrix.
def _find_chessboard_points(img: np.ndarray, board_size: tuple,
write_dir: "string or None" = None) -> np.ndarray :
"""
Identify points on a chessboard image
Identifies the chessboard point in a greyscale image, returning None if it
is not able to find one of the specified size. Will write a sequence of
images with the identified chessboard points to write_dir if a chessboard
is found and write_dir is specified.
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(img,np.ndarray):
raise TypeError("img must be a numpy array")
elif not (len(img.shape)==2):
raise ValueError("img must have two dimensions")
elif not isinstance(board_size,tuple):
raise TypeError("board_size must be a tuple")
elif not (len(board_size)==2):
raise ValueError("board_size must have two items")
elif not (isinstance(write_dir,str) or write_dir is None):
raise TypeError("write_dir must be a str or None")
if isinstance(write_dir,str):
if not os.path.isdir(write_dir):
raise ValueError("write_dir must be a valid directory")
flag, corners = cv.findChessboardCorners(img,board_size)
if flag:
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
image_points = cv.cornerSubPix(img,corners,(11,11),(-1,-1),criteria)
if write_dir is not None: # in this case it must be a valid directory
print_chessboard_corners(img,image_points,board_size,point_dir)
elif not flag:
return None
return image_points
def _create_worldpoints_grid(board_size: tuple, square_size: 'int or float') \
-> np.ndarray:
""" makes world points for the specified grid """
if not (len(board_size)==2):
raise ValueError("board_size must have two items")
elif not isinstance(board_size[0],(int,float)):
raise TypeError("board_size[0] must be an int or float")
elif not isinstance(board_size[1],(int,float)):
raise TypeError("board_size[1] must be an int or float")
elif not isinstance(square_size,(int,float)):
raise TypeError("square_size must be an int or float")
x = np.arange(0,board_size[0],1,np.float32) * square_size
y = np.arange(0,board_size[1],1,np.float32) * square_size
X, Y = np.meshgrid(x,y)
return np.stack((X.ravel(),Y.ravel()),axis=1)
def find_homography(img: np.ndarray, board_size: tuple,
square_size: 'positive int or float',
ret_points: bool = False) -> tuple :
"""
Attempts to find a homogaphy from a calibration board image.
Finds a homography from a calibration board with board size equal to
or less than the provided size, and greater than or equal to (3,3)
Raises a TypeError or ValueError for some inputs. Raises an
AssertionError if no checkerboard is found in the image.
"""
if not isinstance(img,np.ndarray):
raise TypeError("img must be a numpy array")
elif not (len(img.shape) == 2):
raise ValueError("img must have two dimensions")
elif not isinstance(board_size,tuple):
raise TypeError("board_size must be a tuple")
elif not (len(board_size) == 2):
raise ValueError("board_size must have two items")
elif not isinstance(square_size,(int,float)):
raise TypeError("square_size but be an int or float")
elif not (square_size > 0):
raise ValueError("square_size non-zero and positive")
# generate a list of possible grid sizes
sizes = []
rng = range(board_size[1],3-1,-1)
for width in range(board_size[0],3-1,-1):
sizes.append(zip((width,)*len(rng),rng))
sizes = [item for subzip in sizes for item in subzip]
# increment through sizes until a valid board is found
counter, image_points = 0, None
while image_points is None and counter < len(sizes):
board_size = sizes[counter]
image_points = _find_chessboard_points(img,board_size)
counter += 1
# if a board is not found, raise an error
assert image_points is not None, "unable to find a checkerboard in image"
world_points = _create_worldpoints_grid(board_size,square_size)
H, _ = cv.findHomography(image_points, world_points)
if ret_points:
return H, board_size, image_points, world_points
return H, board_size
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR PIXEL TO PHYSICAL~~~~~~~~~~~~~~~~~~
# These functions are designed to help convert pixel location data into
# physical location data.
def _find_lineartrend(xdata: np.ma.MaskedArray, ydata: np.ma.MaskedArray) \
-> np.ndarray :
"""
Identify a linear trend in the data.
Identify the slope of the linear trend for the given xdata and ydata where
outliers are removed. xdata and ydata must be one dimensional arrays. Inliers
are determined by lying 3 standard deviations out after detrending. The
return matrix, R, is a rotation matrix with rotation taken about the z axis,
or the optical axis in the case of pixel data.
"""
if not isinstance(xdata,np.ma.MaskedArray):
raise TypeError("xdata must be a numpy masked array")
elif not (len(xdata.shape)==1):
raise ValueError("xdata must have one dimensions")
elif not isinstance(ydata,np.ma.MaskedArray):
raise TypeError("ydata must be a numpy masked array")
elif not (len(ydata.shape)==1):
raise ValueError("ydata must have one dimensions")
elif not (xdata.shape==ydata.shape):
raise ValueError("xdata and ydata must have the same shape")
data = np.ma.column_stack((xdata,ydata))
valid_data = np.ma.compress_rows(data)
y_detrend = detrend(valid_data[:,1])
_, _, mean, var, _, _ = describe(y_detrend)
std = math.sqrt(var)
valid_data[:,1] = np.ma.masked_where(np.abs(y_detrend - mean) > 4*std,
valid_data[:,1])
valid_data = np.ma.compress_rows(valid_data)
slope = linregress(valid_data[:,0],valid_data[:,1])[0]
theta = -np.arctan(slope)
# construct a rotation matrix from the angle
R = np.array([
[np.cos(theta),-np.sin(theta),0],
[np.sin(theta), np.cos(theta),0],
[0 , 0 ,1]
])
return R
def _apply_homography(H: np.ndarray, vdata: np.ndarray) -> tuple :
"""
Apply a homography, H, to pixel data where only v of (u,v,1) is needed.
Apply a homography to pixel data where only v of the (u,v,1) vector
is given. It is assumed that the u coordinate begins at 0.
The resulting vector (x,y,z) is normalized by z to find (x,y,1)
"""
if not isinstance(H,np.ndarray):
raise TypeError("H must be a numpy array")
elif not (H.shape==(3,3)):
raise ValueError("H must have shape (3,3)")
elif not isinstance(vdata,np.ma.MaskedArray):
raise TypeError("vdata must be a numpy masked array")
elif not (len(vdata.shape)==2):
raise ValueError("vdata must have two dimensions")
# build stack of (u,v,1) vectors
N,M = vdata.shape
u, v = np.arange(0,M,1), np.arange(0,N,1)
udata = np.ma.array(np.meshgrid(u,v)[0] ,mask=vdata.mask)
wdata = np.ma.array(np.ones(vdata.shape),mask=vdata.mask)
data = np.ma.stack((udata.ravel(),vdata.ravel(),wdata.ravel()),axis=-1).T
# apply H but ignore columns which have any masked values
valid_data = np.matmul(H,np.ma.compress_cols(data))
# normalize by the second index
for i in range(3):
valid_data[i,:] = np.divide(valid_data[i,:],valid_data[2,:])
# extract valid values into array with original shape
idx = np.ma.array(np.arange(data.shape[1]),mask=vdata.ravel().mask)
valid_idx = np.ma.compressed(idx)
data = np.zeros((2,data.shape[1]))
data[0,valid_idx] = valid_data[0,:]
data[1,valid_idx] = valid_data[1,:]
data = data.reshape(2,N,M)
return np.ma.array(data[0,:,:],mask=vdata.mask), \
np.ma.array(data[1,:,:],mask=vdata.mask)
def _is_rotationmatrix(R: np.ndarray, tol: float = 1e-6) -> bool:
""" returns True if R is a rotation matrix and False otherwise """
if not isinstance(R,np.ndarray):
raise TypeError("R must be a numpy array")
elif not (isinstance(tol,float) and tol > 0):
raise TypeError("tol must be a positive float")
if not (len(R.shape)==2 and R.shape[0]==R.shape[1]):
return False
Rt = np.transpose(R)
Rt_dot_R = np.dot(Rt, R)
I = np.identity(R.shape[0], dtype = R.dtype)
n = np.linalg.norm(I - Rt_dot_R)
return n < tol
def _apply_rotationmatrix(R: "rotation matrix", xdata: np.ndarray,
ydata: np.ndarray) -> tuple :
""" applies the rotation matrix R to the vector (x,y,1) for each item in
xdata and ydata """
if not isinstance(R,np.ndarray):
raise TypeError("R must be a numpy array")
elif not (R.shape==(3,3)):
raise ValueError("R must have shape (3,3)")
elif not _is_rotationmatrix(R):
raise ValueError("R must be a rotation matrix")
elif not isinstance(xdata,np.ma.MaskedArray):
raise TypeError("xdata must be a numpy masked array")
elif not isinstance(ydata,np.ma.MaskedArray):
raise TypeError("ydata must be a numpy masked array")
elif not (xdata.shape==ydata.shape):
raise ValueError("xdata and ydata must have the same shape")
N,M = ydata.shape
mask = ydata.mask
zdata = np.ma.ones((N,M))
data = np.matmul(R,np.stack((xdata.data,ydata.data,zdata.data),axis=0)
.reshape(3,-1)).reshape(3,N,M)
return np.ma.array(data[0,:,:],mask=mask), \
np.ma.array(data[1,:,:],mask=mask)
def find_physical(H: np.ndarray, vdata: np.ma.MaskedArray,
R: 'np.ndarray or None' = None, zero: bool = True) -> tuple :
"""
Finds the physical values associated with the surface line of lif data.
Apply a homography, H, to pixel data then remove linear trends either by
utilizing the specified value of R, or finding a trend when R is None.
The first index, assumed to be the spatial index, is forced to increase in
increase in value with indices with a 180 degree (pi radian) rotation about
the z axis if needed. If the pixel data is taken with positive downward, as
is typical, the y axis will point downward, and the z axis will point 'into
the paper.' The median value of the y data is set to zero by default, as
indicated with zero=True.
Returns TypeError or ValueError for some inputs.
"""
if not isinstance(H,np.ndarray):
raise TypeError("H must be a numpy array")
elif not (H.shape==(3,3)):
raise ValueError("H must have shape (3,3)")
elif not isinstance(vdata,np.ma.MaskedArray):
raise TypeError("vdata must be a numpy masked array")
elif not (len(vdata.shape)==2 and vdata.shape[0]>1):
raise ValueError("vdata must have two dimensions and dimension 0 must \
be greater than 1")
elif not (isinstance(R,np.ndarray) or R is None):
raise TypeError("R must be numpy ndarray or None")
elif not isinstance(zero,bool):
raise TypeError("zero must be bool")
xdata, ydata = _apply_homography(H,vdata)
if R is None:
R = _find_lineartrend(xdata[0,:],ydata[0,:])
xdata, ydata = _apply_rotationmatrix(R,xdata,ydata)
idx_sort = sorted([xdata.argmin(),xdata.argmax()])
xleft = xdata.ravel()[idx_sort[0]]
xright = xdata.ravel()[idx_sort[1]]
if zero:
isfine = np.isfinite(ydata[~ydata.mask])
ydata = ydata - np.ma.median(ydata[~ydata.mask][isfine])
return xdata, ydata, R
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR CONFORMING DATA~~~~~~~~~~~~~~~~~~
# These functions are designed to help confrom the unstructured data output by
# find_physical onto structed data arrays by using scipy's implementation
# of Qhull (Delaunay triangulation)
def _find_integermultiple(x: float, dx: float, tol: float = 1e-6) -> int:
"""
Find integer multiple of dx close to x within the interval (0,x).
Returns an integer, q, where q*dx is the largest value within (0,x),
give or take floating point tolerance.
if x is positive, returns a less positive number i.e. smaller number.
if x is negative, returns a less negative number i.e. greater number.
"""
if not isinstance(x,float):
raise TypeError("x must be a float")
elif math.isnan(x):
return float('nan')
elif not isinstance(dx,float):
raise TypeError("dx must be a float")
elif not isinstance(tol,float):
raise TypeError("tol must be a float")
q, r = divmod(abs(x),dx)
if abs(r-dx)<tol:
q += 1
if x<0:
q = -q
return int(q)
def find_interpolation(xdata: np.ndarray, ydata: np.ndarray, dx: float,
tol: float = 1e-6) -> tuple :
"""
Interpolates xdata and ydata onto a structured grid with triangulation.
Given xdata and ydata are 2D arrays where the first index is uniformly
sampled, and the second index in non-uniformly sampled (as generally output
by find_physical), return uniformly sampled data based on linear
interpolation by Delaunay triangulation.
Raises a TypeError or ValueError for some inputs. Raises AssertionError if
the interpolation fails.
"""
if not isinstance(xdata,np.ndarray):
raise TypeError("xdata must a numpy array")
elif not (len(xdata.shape)==2):
raise ValueError("xdata must have two dimensions")
elif not isinstance(ydata,np.ndarray):
raise TypeError("ydata must be a numpy array")
elif not (len(ydata.shape)==2):
raise ValueError("ydata must have two dimensions")
elif not (xdata.shape==ydata.shape):
raise ValueError("xdata and ydata must be the same shape")
elif not isinstance(dx,float):
raise TypeError("dx must be a float")
elif not (dx > 0):
raise ValueError("dx must be a positive float")
elif not isinstance(tol,float):
raise TypeError("tol must be a float")
elif not (tol > 0):
raise ValueError("tol must be a positive float")
from scipy.interpolate import griddata
t = np.repeat(
np.arange(ydata.shape[0])[:,None],ydata.shape[1],axis=-1).ravel()
x = xdata.ravel()
y = ydata.ravel()
for a in [t,x,y]:
assert np.isfinite(a).any(), "invalid data"
ti = np.arange(ydata.shape[0])
P = _find_integermultiple(np.nanmin(xdata),dx)
Q = _find_integermultiple(np.nanmax(xdata),dx)
xi = np.arange(P,Q+1,1)
locs = (ti[:,None],dx*xi[None,:])
# t, x, and y must all be finite for valid index
mask = np.isfinite(np.stack((t,x,y),axis=-1)).all(axis=-1)
points = (t[mask], x[mask])
values = y[mask]
assert mask.any(), "something has gone wrong..."
ynew = griddata(points,values,locs,method='linear',rescale=True)
# ynew lies on a convex hull from griddata
# at the sides of the images there may be columns with
# some nan values, however there will be some interior square
# of the convex hull with no nan values
# there is probably a better way to find this square, but
# here we assume that cropping on just the x axis is enough
mask = np.isfinite(ynew).all(axis=0)
xi_start, xi_stop = xi[mask].min(), xi[mask].max()
xi = np.arange(xi_start,xi_stop+1,1)
ynew = ynew[:,mask]
assert np.isfinite(ynew).all(), "still some nan values..."
return xi.ravel(), ynew, dx
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR COMBINING DATA~~~~~~~~~~~~~~~~~~
# These functions are designed to help merge LIF measurements from two
# or more overlapping locations into a single data set.
def find_adjustment(tdata : tuple, xdata : tuple, ydata : tuple,
numstept=10,numstepx=10,tol=1e-6) -> tuple:
"""
Find best fit of data with temporal and spatial offset in range. Returns
the tuple err, dt, dx.
Finds a temporal and spatial offset to apply to the temporal and spatial
locations of the lif data such that the corresponding elevation data has
minimal absolute difference. find_adjustment takes a brute force approach,
and will compare the difference in ydata at overlapping tdata and xdata
locations for all offsets within plus or minus numstept and numstepx. By
default 400 possible offsets are evaluated. tdata and xdata must be
integer types in order to find the overlapping tdata and xdata locations.
Raises a TypeError for some inputs. Raises a ValueError if there is no
intersection in tdata & xdata,
"""
if not (isinstance(tdata,tuple) and len(tdata)==2):
raise TypeError("tdata must be a tuple with length 2")
elif not (tdata[0].dtype==int and tdata[1].dtype==int):
raise TypeError(f"t in tdata must have dtype int but has dtypes " \
f"{tdata[0].dtype} and {tdata[1].dtype}")
elif not (isinstance(xdata,tuple) and len(xdata)==2):
raise TypeError("xdata must be a tuple with length 2")
elif not (xdata[0].dtype==int and xdata[1].dtype==int):
raise TypeError(f"x in xdata must have dtype int but has dtypes " \
f"{xdata[0].dtype} and {xdata[1].dtype}")
elif not (isinstance(ydata,tuple) and len(ydata)==2):
raise TypeError("ydata must be a tuple with length 2")
# create all possibile pairs of offsets in the range
if numstept == 0:
dt = np.asarray([0],dtype=int)
else:
dt = np.arange(-numstept,numstept+1)
if numstepx == 0:
dx = np.asarray([0],dtype=int)
else:
dx = np.arange(-numstepx,numstepx+1)
DT, DX = tuple(np.meshgrid(dt,dx))
pos = np.transpose(np.stack([DT.ravel(),DX.ravel()]))
# for each possible offset in space and time, estimate the error
err = np.empty(DT.ravel().shape)
err[:] = np.nan # invalid by default
for idx, p in enumerate(pos):
dt, dx = p
_, tidx0, tidx1 = np.intersect1d(tdata[0],tdata[1]+dt,return_indices=True)
_, xidx0, xidx1 = np.intersect1d(xdata[0],xdata[1]+dx,return_indices=True)
# it is possible that dt and dx will push them out of overlapping
# skip in that case (err[idx] = np.nan by default)
if not ( tidx0.size==0 or xidx0.size==0
or tidx1.size==0 or xidx1.size==0 ):
yidx0 = tuple(np.meshgrid(tidx0,xidx0,indexing = 'ij'))
yidx1 = tuple(np.meshgrid(tidx1,xidx1,indexing = 'ij'))
#err[idx] = np.mean(np.abs(ydata[0][yidx0] - ydata[1][yidx1]))
err[idx] = np.mean((ydata[0][yidx0] - ydata[1][yidx1])**2)
# error out if there is no intersection of the data for any offset
if np.isnan(err).all():
raise ValueError("xdata and tdata have no intersection")
idx_min = np.nanargmin(err)
dt, dx = pos[idx_min]
return err[idx_min], dt, dx
def find_weightedoverlap(tdata : tuple,xdata : tuple,ydata : tuple) -> tuple:
"""
Finds a weighted average of elevation data where there is overlap. Returns
the tuple yidx0, yidx1, ylap.
Finds a weighted average of elevation data where the temporal and spatial
data overlap. The weights vary linearly on the spatial axis from each end
of the intersection. Requires temporal and spatial data are provided in
integer format (e.g. the integer n where the assocaited time is n*dt).
"""
if not isinstance(tdata,tuple):
raise TypeError("tdata must be a tuple")
elif not isinstance(xdata,tuple):
raise TypeError("xdata must be a tuple")
elif not isinstance(ydata,tuple):
raise TypeError("ydata must be a tuple")
elif not (len(tdata) == len(xdata) == len(ydata) == 2):
raise ValueError("tdata, xdata, and ydata must have len of two")
elif not (len(tdata[0].shape) == 1 and len(tdata[1].shape) == 1):
raise ValueError("each item in tdata must have one axis")
elif not (len(xdata[0].shape) == 1 and len(xdata[1].shape) == 1):
raise ValueError("each item in xdata must have one axis")
elif not (len(ydata[0].shape) == 2 and len(ydata[1].shape) == 2):
raise ValueError("each item in ydata must have two axes")
elif not np.all(np.diff(tdata[0]) > 0) and np.all(np.diff(tdata[1]) > 0):
raise ValueError("each item in tdata must be monotonically increasing")
elif not np.all(np.diff(xdata[0]) > 0) and np.all(np.diff(xdata[1]) > 0):
raise ValueError("each item in xdata must be monotonically increasing")
elif not (xdata[0].min() < xdata[1].min() and xdata[0].max() < xdata[1].max()):
raise ValueError("xdata[0] must start and end lower in value than xdata[1]")
# Assume uniformly sampled in both time and space
# Assume tdata and xdata are integer type arrays for both items in tuple
_, tidx0, tidx1 = np.intersect1d(tdata[0],tdata[1],return_indices=True)
_, xidx0, xidx1 = np.intersect1d(xdata[0],xdata[1],return_indices=True)
yidx0 = tuple(np.meshgrid(tidx0,xidx0,indexing = 'ij'))
yidx1 = tuple(np.meshgrid(tidx1,xidx1,indexing = 'ij'))
P, Q = len(xdata[0][xidx0]), len(tdata[0][tidx0])
w0 = np.repeat(np.linspace(1,0,P).reshape(1,P),Q,axis=0)
w1 = np.repeat(np.linspace(0,1,P).reshape(1,P),Q,axis=0)
ylap = w0*(ydata[0][yidx0]) + w1*(ydata[1][yidx1])
return yidx0, yidx1, ylap
def list_adjust_data(tdata: list, xdata: list, ydata: list,
copy: bool = True, numstept: int = 10,
numstepx: int = 10) -> tuple:
"""
Returns the recommended adjustments for tdata and xdata as
(adjust_t, adjust_x). By default creates a copy of the data
to modify, otherwise the input data will be modified in place.
"""
# Input checking TBD
# create a copy of each numpy array
if copy:
tdata = [t.copy() for t in tdata]
xdata = [x.copy() for x in xdata]
ydata = [y.copy() for y in ydata]
adjust_t = np.zeros((len(ydata)-1,),dtype=int)
adjust_x = np.zeros((len(ydata)-1,),dtype=int)
for idx, t1, t2, x1, x2, y1, y2 in zip(range(len(tdata)-1),
tdata[:-1],tdata[1:],
xdata[:-1],xdata[1:],
ydata[:-1],ydata[1:]):
_, dt, dx = find_adjustment((t1,t2),(x1,x2),(y1,y2),
numstept=numstept, numstepx=numstepx)
adjust_t[idx] = dt
adjust_x[idx] = dx
t2 += dt
dx += dx
return adjust_t, adjust_x
def list_merge_data(tdata: list, xdata: list, ydata: list,
adjust: bool = False) -> tuple:
"""
Returns the tuple t, x, y where each is a single np.ndarray merged
from the data provided as a list. If adjust=True, applies an adjustment
offset to tdata and xdata based on the output of find_adjustment
Raises a TypeError or ValueError for some inputs.
"""
if not isinstance(tdata,list):
raise TypeError("tdata must be a list")
elif not isinstance(xdata,list):
raise TypeError("xdata must be a list")
elif not isinstance(ydata,list):
raise TypeError("ydata must be a list")
elif not (len(tdata) == len(xdata) == len(ydata)):
raise ValueError("tdata, xdata, and ydata must have the same length")
elif not isinstance(adjust,bool):
raise TypeError("adjust must be a bool")
# each element should be a numpy array
tdata_type = [ isinstance(t,np.ndarray) for t in tdata ]
xdata_type = [ isinstance(x,np.ndarray) for x in xdata ]
ydata_type = [ isinstance(y,np.ndarray) for y in ydata ]
if not all(tdata_type):
raise TypeError("all elements in tdata must be np.ndarray")
elif not all(xdata_type):
raise TypeError("all elements in xdata must be np.ndarray")
elif not all(ydata_type):
raise TypeError("all elements in ydata must be np.ndarray")
# make sure all y are (N,M), t is (N,) and x is (M,)
shape_compare = [ y.shape == t.shape + x.shape for t,x,y in zip(tdata,xdata,ydata) ]
tdata_shape_len = [ len(t.shape)==1 for t in tdata ]
xdata_shape_len = [ len(x.shape)==1 for x in xdata ]
ydata_shape_len = [ len(y.shape)==2 for y in ydata ]
# make sure location data is monotonically increasing with index
tdata_monotonic = [ np.all(np.diff(t)>0) for t in tdata ]
xdata_monotonic = [ np.all(np.diff(x)>0) for x in xdata ]
if not all(shape_compare):
raise ValueError("shape must match all data")
elif not all(tdata_shape_len):
raise ValueError("each item in tdata must have 1 axis")
elif not all(xdata_shape_len):
raise ValueError("each item in xdata must have 1 axis")
elif not all(ydata_shape_len):
raise ValueError("each item in ydata must have 2 axes")
elif not all(tdata_monotonic):
raise ValueError("each item in tdata must be monotonically increasing")
elif not all(xdata_monotonic):
raise ValueError("each item in xdata must be monotonically increasing")
xdata_min = np.array([ x.min() for x in xdata ],dtype=int)
xdata_max = np.array([ x.max() for x in xdata ],dtype=int)
# each item in tdata should overlap but not lie within its neighbors
# we have already checked that tdata is mononotically increasing for each
# item in the list, now we must sort the list by min and max
# if they are the same sort, then they are increasing but do not lie
# within each other (they might not overlap at this point)
xdata_min_sortidx = np.argsort(xdata_min)
xdata_max_sortidx = np.argsort(xdata_max)
if not (xdata_min_sortidx == xdata_max_sortidx).all():
raise ValueError("some xdata lies entirely within another, all xdata" \
"must have some unique measurements")
# sort all by increasing xdata
sortidx = xdata_min_sortidx
tdata = [ tdata[idx] for idx in sortidx ]
xdata = [ xdata[idx] for idx in sortidx ]
ydata = [ ydata[idx] for idx in sortidx ]
# now that they are sorted in increasing order, ensure that each
# overlaps with the next
xdata_overlapping = np.greater(xdata_max[sortidx][:-1],xdata_min[sortidx][1:]).all()
if not xdata_overlapping:
raise ValueError("not all xdata are overlapping when sorted")
# this may not be enough checks for data that is not uniformly sampled
# these checks appear to be enought for data with a step size of 1, i.e.
# all([ np.all(np.diff(t)==1) for t in tdata ]) = True
# all([ np.all(np.diff(x)==1) for x in xdata ]) = True
# so there may be some edge cases that are not tested for if the above
# conditions are not true
if adjust:
for t1, t2, x1, x2, y1, y2 in zip(tdata[:-1],tdata[1:],xdata[:-1],xdata[1:],
ydata[:-1],ydata[1:]):
_, dt, dx = find_adjustment((t1,t2),(x1,x2),(y1,y2))
# assumes that t2 and x2 are references to the arrays so that they may be
# modified in place without fancy footwork in the zip
t2 += dt
x2 += dx
# now find a time array that intersects with all tdata
time = np.intersect1d(tdata[0],tdata[1])
for t in tdata[2:]:
time = np.intersect1d(t,time)
if time.size == 0:
raise ValueError("there is no overlap in tdata")
# reduce data in lists to intersect time exclusively
for idx,t,y in zip(range(len(tdata)),tdata,ydata):
_, tidx, _ = np.intersect1d(t,time,return_indices=True)
tdata[idx] = t[tidx]
ydata[idx] = y[tidx,:]
# replace ydata in overlapping regions
for idx in range(len(tdata)-1):
yidx1, yidx2, ylap = find_weightedoverlap((tdata[idx],tdata[idx+1]),
(xdata[idx],xdata[idx+1]),
(ydata[idx],ydata[idx+1]))
ydata[idx ][yidx1] = ylap
ydata[idx+1][yidx2] = ylap
# combine xdata and ydata into a single array by appending non-overlapping
# data. Here is is assumed that the overlapping data was included in the
# perviously appended data and that all data is on the same time for axis 0
space = xdata[0]
elevation = ydata[0]
for x1, x2, _, y2 in zip(xdata[:-1],xdata[1:],ydata[:-1],ydata[1:]):
_, _, xidx2 = np.intersect1d(x1,x2,return_indices=True)
xmask2 = np.ones(x2.shape,dtype=bool)
xmask2[xidx2] = False
space = np.append(space,x2[xmask2])
elevation = np.append(elevation,y2[:,xmask2],axis=1)
return time, space, elevation
#~~~~~~~~~~~~~~~~~~HELPER FUNCTIONS FOR ESTIMATING DEPTH~~~~~~~~~~~~~~~~~~
# these funtions are not complete or well tested
def __find_camerapose(img,board_size,square_size,camera_matrix, dist_coefs):
""" estimate camera pose in the opencv sense"""
image_points = find_chessboard_points(img,board_size)
world_points = np.zeros((board_size[0]*board_size[1],3), np.float32)
world_points[:,:2] = np.mgrid[0:board_size[0],0:board_size[1]].T.reshape(-1,2)
_, rvecs, t = cv.solvePnP(world_points, image_points, camera_matrix, dist_coefs)
R, _ = cv.Rodrigues(rvecs)
if R[0][0] < 0:
theta = 3.14
Rtemp = np.array([
[np.cos(theta),-np.sin(theta),0],
[np.sin(theta),np.cos(theta),0],
[0,0,1]
])
R = Rtemp.dot(R)
return R,t
def __find_pixelunitvector(u,v,camera_matrix):
""" estimate unit vector in pixel space """
fx = camera_matrix[0,0]
cx = camera_matrix[0,2]
fy = camera_matrix[1,1]
cy = camera_matrix[1,2]
uvec = np.array([(u-cx)/fx,(v-cy)/fy,1])
return uvec/np.linalg.norm(uvec)
def __apply_snellslaw(ivec,nvec,n1,n2):
""" estimate refraction from snells law """
assert type(ivec) is np.ndarray and ivec.shape == (3,) \
and np.isfinite(ivec).all() \
and type(nvec) is np.ndarray and nvec.shape == (3,) \
and np.isfinite(nvec).all() \
and type(n1) is float and type(n2) is float, 'invalid input'
mu = n1/n2
n_dot_i = nvec.dot(ivec)
tvec = np.sqrt(1-((mu**2)*(1 - n_dot_i**2)))*nvec \
+ mu*(ivec-(n_dot_i*nvec))
assert check_snellslaw(ivec,tvec,nvec,n1,n2), f"invalid input with i: {ivec[0]}, j: {ivec[1]}, k:{ivec[2]}"
return tvec
def __check_snellslaw(ivec,tvec,nvec,n1,n2,tol=1e-6):
""" check if vector is estimate of snells law"""
assert type(ivec) is np.ndarray and ivec.shape == (3,) \
and np.isfinite(ivec).all() \
and type(tvec) is np.ndarray and tvec.shape == (3,) \
and np.isfinite(tvec).all() \
and type(nvec) is np.ndarray and nvec.shape == (3,) \
and np.isfinite(nvec).all() \
and type(n1) is float and type(n2) is float and type(tol) is float, 'invalid input'
return (np.cross(nvec,tvec) - (n1/n2)* | np.cross(nvec,ivec) | numpy.cross |
#!flask/bin/python
import os
import numpy as np
from scipy import signal
from scipy.stats import describe
from tensorflow.keras.models import load_model
from tensorflow.keras.metrics import mean_squared_error
#import tflite_runtime.interpreter as tflite
import tensorflow as tf
# import tensorflow.keras as keras
# from Custom_Layers import Dropout_Live
from joblib import dump, load
cwd = os.path.dirname(os.path.abspath(__file__))
class RunModels(object):
def __init__(self,preload_models,basePath,desktop=False):
self.basePath = basePath
self.preload_models = preload_models
if preload_models == True:
self.pca_gmm_model = load(basePath + "Models/GMM/PCA-GMM.joblib")
#self.cnn_ae_model = load_model(basePath + "Models/Autoencoder/Full/CNN-AE.h5")
#self.ae_model = load_model(basePath + "Models/Autoencoder/Full/AE.h5")
self.cnn_ae_lite_model = tf.lite.Interpreter(model_path=self.basePath + "Models/Autoencoder/Lite/CNN-AE-Lite.tflite")
self.pca_gnb_model = load(basePath + "Models/GNB/PCA-GNB.joblib")
#self.mlp_model = load_model(basePath + "Models/MLP-Classifier/Full/MLP.h5")
#self.cnn_mlp_model = load_model(basePath + "Models/MLP-Classifier/Full/CNN-MLP.h5")
self.cnn_mlp_lite_model = tf.lite.Interpreter(model_path=self.basePath + "Models/MLP-Classifier/Lite/CNN-MLP-Lite.tflite")
def classifier_inference_full(self,values,modelId):
xInference = np.atleast_2d(np.array(values).astype(np.float32))
if self.preload_models:
if 'cnn' in modelId.lower():
model = self.cnn_mlp_model
else:
model = self.mlp_model
else:
if 'cnn' in modelId.lower():
model = load_model(self.basePath + "Models/MLP-Classifier/Full/CNN-MLP.h5")
else:
model = load_model(self.basePath + "Models/MLP-Classifier/Full/MLP.h5")
X_predict = np.atleast_2d(xInference)
if 'cnn' in modelId.lower():
X_predict = X_predict[...,np.newaxis]
predict = model.predict(X_predict)
classification = predict[0,0].astype(float)
return classification
def model_inference_full(self,values,modelId):
xInference = np.atleast_2d( | np.array(values) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
# be careful with deep and shallow copies
class Quat(object):
def __init__(self, *args, **kwargs):
self.quatCoef = np.zeros(4, dtype=float)
# construt with Bunge euler angles (radians, ZXZ)
if len(args) == 3:
ph1 = args[0]
phi = args[1]
ph2 = args[2]
self.quatCoef[0] = np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0)
self.quatCoef[1] = -np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0)
self.quatCoef[2] = -np.sin(phi / 2.0) * np.sin((ph1 - ph2) / 2.0)
self.quatCoef[3] = -np.cos(phi / 2.0) * np.sin((ph1 + ph2) / 2.0)
# construt with array of quat coefficients
elif len(args) == 1:
self.quatCoef = args[0]
# construt with quat coefficients
elif len(args) == 4:
self.quatCoef[0] = args[0]
self.quatCoef[1] = args[1]
self.quatCoef[2] = args[2]
self.quatCoef[3] = args[3]
if (self.quatCoef[0] < 0):
self.quatCoef = self.quatCoef * -1
# overload static method with instance method of same name in object
self.plotIPF = self._plotIPF
@classmethod
def fromAxisAngle(cls, axis, angle):
"""Create a quat object from an axis angle pair
Args:
axis (np.array size 3): Axis of rotation
angle (float): Rotation arround axis (radians)
Returns:
Quat: Initialised Quat object
"""
# normalise the axis vector
axis = axis / np.sqrt(np.dot(axis, axis))
# calculate quat coefficients
quatCoef = np.zeros(4, dtype=float)
quatCoef[0] = np.cos(angle / 2)
quatCoef[1:4] = np.sin(angle / 2) * axis
# call constructor
return cls(quatCoef)
def eulerAngles(self):
# See Melcher, <NAME>, <NAME>, <NAME>, B. Conversion of EBSD data by a
# quaternion based algorithm to be used for grain structure simulations
# or
# Rowenhorst, D et al. Consistent representations of and conversions between 3D rotations
# P = +1
eulers = np.empty(3, dtype=float)
q = self.quatCoef
q03 = q[0]**2 + q[3]**2
q12 = q[1]**2 + q[2]**2
chi = np.sqrt(q03 * q12)
if (chi == 0 and q12 == 0):
eulers[0] = np.arctan2(-2 * q[0] * q[3],
q[0]**2 - q[3]**2)
eulers[1] = 0
eulers[2] = 0
elif (chi == 0 and q03 == 0):
eulers[0] = np.arctan2(2 * q[1] * q[2],
q[1]**2 - q[2]**2)
eulers[1] = np.pi
eulers[2] = 0
else:
cosPh1 = (-q[0] * q[1] - q[2] * q[3]) / chi
sinPh1 = (-q[0] * q[2] + q[1] * q[3]) / chi
cosPhi = q[0]**2 + q[3]**2 - q[1]**2 - q[2]**2
sinPhi = 2 * chi
cosPh2 = (-q[0] * q[1] + q[2] * q[3]) / chi
sinPh2 = (q[1] * q[3] + q[0] * q[2]) / chi
eulers[0] = np.arctan2(sinPh1, cosPh1)
eulers[1] = np.arctan2(sinPhi, cosPhi)
eulers[2] = np.arctan2(sinPh2, cosPh2)
if eulers[0] < 0:
eulers[0] += 2 * np.pi
if eulers[2] < 0:
eulers[2] += 2 * np.pi
return eulers
def rotMatrix(self):
rotMatrix = np.empty((3, 3), dtype=float)
q = self.quatCoef
qbar = q[0]**2 - q[1]**2 - q[2]**2 - q[3]**2
rotMatrix[0, 0] = qbar + 2 * q[1]**2
rotMatrix[0, 1] = 2 * (q[1] * q[2] - q[0] * q[3])
rotMatrix[0, 2] = 2 * (q[1] * q[3] + q[0] * q[2])
rotMatrix[1, 0] = 2 * (q[1] * q[2] + q[0] * q[3])
rotMatrix[1, 1] = qbar + 2 * q[2]**2
rotMatrix[1, 2] = 2 * (q[2] * q[3] - q[0] * q[1])
rotMatrix[2, 0] = 2 * (q[1] * q[3] - q[0] * q[2])
rotMatrix[2, 1] = 2 * (q[2] * q[3] + q[0] * q[1])
rotMatrix[2, 2] = qbar + 2 * q[3]**2
return rotMatrix
# show components when the quat is printed
def __repr__(self):
return "[%.4f, %.4f, %.4f, %.4f]" % (self.quatCoef[0], self.quatCoef[1], self.quatCoef[2], self.quatCoef[3])
def __str__(self):
return "[%.4f, %.4f, %.4f, %.4f]" % (self.quatCoef[0], self.quatCoef[1], self.quatCoef[2], self.quatCoef[3])
def _plotIPF(self, direction, symGroup, **kwargs):
Quat.plotIPF([self], direction, symGroup, **kwargs)
# overload * operator for quaterion product and vector product
def __mul__(self, right):
if isinstance(right, type(self)): # another quat
newQuatCoef = np.zeros(4, dtype=float)
newQuatCoef[0] = (self.quatCoef[0] * right.quatCoef[0] -
np.dot(self.quatCoef[1:4], right.quatCoef[1:4]))
newQuatCoef[1:4] = (self.quatCoef[0] * right.quatCoef[1:4] +
right.quatCoef[0] * self.quatCoef[1:4] +
np.cross(self.quatCoef[1:4], right.quatCoef[1:4]))
return Quat(newQuatCoef)
raise TypeError()
# # overload % operator for dot product
# def __mod__(self, right):
def dot(self, right):
if isinstance(right, type(self)):
return np.dot(self.quatCoef, right.quatCoef)
raise TypeError()
# overload + operator
def __add__(self, right):
if isinstance(right, type(self)):
return Quat(self.quatCoef + right.quatCoef)
raise TypeError()
# overload += operator
def __iadd__(self, right):
if isinstance(right, type(self)):
self.quatCoef += right.quatCoef
return self
raise TypeError()
# allow array like setting/getting of components
def __getitem__(self, key):
return self.quatCoef[key]
def __setitem__(self, key, value):
self.quatCoef[key] = value
return
def norm(self):
return np.sqrt(np.dot(self.quatCoef[0:4], self.quatCoef[0:4]))
def normalise(self):
self.quatCoef /= self.norm()
return
# also the inverse if this is a unit quaterion
@property
def conjugate(self):
return Quat(self.quatCoef[0], -self.quatCoef[1], -self.quatCoef[2], -self.quatCoef[3])
def transformVector(self, vector):
"""Transforms vector by the quaternion. For EBSD quaterions this
is a transformation from sample space to crystal space. Perform
on conjugate of quaternion for crystal to sample.
Args:
vector (numpy.ndarray): Vector to transform
Returns:
numpy.ndarray: Transformed vector
"""
if isinstance(vector, np.ndarray) and vector.shape == (3,):
vectorQuat = Quat(0, vector[0], vector[1], vector[2])
vectorQuatTransformed = (self * vectorQuat) * self.conjugate
vectorTransformed = vectorQuatTransformed.quatCoef[1:4]
return vectorTransformed
raise TypeError("Vector must be a size 3 numpy array.")
def misOri(self, right, symGroup, returnQuat=0):
"""Calculate misorientation angle between 2 orientations taking
into account the symmetries of the crystal structure.
Angle is 2*arccos(output).
Args:
rigth (quat): Orientation to find misorientation to
symGroup (str): Crystal type (cubic, hexagonal)
returnQuat (int): What to return
Returns:
various: returnQuat = 0 - misorientation
returnQuat = 1 - symmetric equivalent with min misorientation
returnQuat = 2 - both
"""
if isinstance(right, type(self)):
minMisOri = 0 # actually looking for max of this as it is cos of misoriention angle
for sym in Quat.symEqv(symGroup): # loop over symmetrically equivelent orienations
quatSym = sym * right
currentMisOri = abs(self.dot(quatSym))
if currentMisOri > minMisOri: # keep if misorientation lower
minMisOri = currentMisOri
minQuatSym = quatSym
if returnQuat == 1:
return minQuatSym
elif returnQuat == 2:
return minMisOri, minQuatSym
else:
return minMisOri
raise TypeError("Input must be a quaternion.")
def misOriAxis(self, right):
"""Calculate misorientation axis between 2 orientations.
This does not consider symmetries of the crystal structure.
Args:
rigth (quat): Orientation to find misorientation axis to
Returns:
numpy.ndarray: axis of misorientation
"""
if isinstance(right, type(self)):
Dq = right * self.conjugate
Dq = Dq.quatCoef
misOriAxis = (2 * Dq[1:4] * np.arccos(Dq[0])) / np.sqrt(1 - np.power(Dq[0], 2))
return misOriAxis
raise TypeError("Input must be a quaternion.")
# Static methods
@staticmethod
def createManyQuats(eulerArray):
"""Create a an array of quats from an array of Euler angles
Args:
eulerArray (array): Size 3 x n x ... x m
"""
ph1 = eulerArray[0]
phi = eulerArray[1]
ph2 = eulerArray[2]
oriShape = eulerArray.shape[1:]
quatComps = np.zeros((4,) + oriShape, dtype=float)
quatComps[0] = np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0)
quatComps[1] = -np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0)
quatComps[2] = -np.sin(phi / 2.0) * np.sin((ph1 - ph2) / 2.0)
quatComps[3] = -np.cos(phi / 2.0) * np.sin((ph1 + ph2) / 2.0)
quats = np.empty(oriShape, dtype=Quat)
for idx in np.ndindex(oriShape):
quats[idx] = Quat(quatComps[(slice(None),) + idx])
# quatComps[(slice(None),) + idx] is equivalent to quatComps[:, idx[0], ..., idx[n]]
return quats
@staticmethod
def calcSymEqvs(quats, symGroup):
syms = Quat.symEqv(symGroup)
quatComps = np.empty((len(syms), 4, len(quats)))
# store quat components in array
for i, quat in enumerate(quats):
quatComps[0, :, i] = quat.quatCoef
# calculate symmetrical equivalents
for i, sym in enumerate(syms[1:], start=1):
# sym[i] * quat for all points (* is quaternion product)
quatComps[i, 0, :] = (quatComps[0, 0, :] * sym[0] - quatComps[0, 1, :] * sym[1] -
quatComps[0, 2, :] * sym[2] - quatComps[0, 3, :] * sym[3])
quatComps[i, 1, :] = (quatComps[0, 0, :] * sym[1] + quatComps[0, 1, :] * sym[0] -
quatComps[0, 2, :] * sym[3] + quatComps[0, 3, :] * sym[2])
quatComps[i, 2, :] = (quatComps[0, 0, :] * sym[2] + quatComps[0, 2, :] * sym[0] -
quatComps[0, 3, :] * sym[1] + quatComps[0, 1, :] * sym[3])
quatComps[i, 3, :] = (quatComps[0, 0, :] * sym[3] + quatComps[0, 3, :] * sym[0] -
quatComps[0, 1, :] * sym[2] + quatComps[0, 2, :] * sym[1])
# swap into positve hemisphere if required
quatComps[i, :, quatComps[i, 0, :] < 0] = -quatComps[i, :, quatComps[i, 0, :] < 0]
return quatComps
@staticmethod
def calcAverageOri(quatComps):
avOri = np.copy(quatComps[0, :, 0])
currMisOris = np.empty(quatComps.shape[0])
for i in range(1, quatComps.shape[2]):
# calculate misorientation between current average and all symmetrical equivalents
# Dot product of each symm quat in quatComps with refOri for point i
currMisOris[:] = abs(np.einsum("ij,j->i", quatComps[:, :, i], avOri))
# find min misorientation with current average then add to it
maxIdx = np.argmax(currMisOris[:])
avOri += quatComps[maxIdx, :, i]
# Convert components back to a quat and normalise
avOri = Quat(avOri)
avOri.normalise()
return avOri
@staticmethod
def calcMisOri(quatComps, refOri):
misOris = np.empty((quatComps.shape[0], quatComps.shape[2]))
# Dot product of each quat in quatComps with refOri
misOris[:, :] = abs(np.einsum("ijk,j->ik", quatComps, refOri.quatCoef))
maxIdxs0 = np.argmax(misOris, axis=0)
maxIdxs1 = np.arange(misOris.shape[1])
minMisOris = misOris[maxIdxs0, maxIdxs1]
minQuatComps = quatComps[maxIdxs0, :, maxIdxs1].transpose()
minMisOris[minMisOris > 1] = 1
return minMisOris, minQuatComps
@staticmethod
def polarAngles(x, y, z):
mod = np.sqrt(x**2 + y**2 + z**2)
x = x / mod
y = y / mod
z = z / mod
# alpha - angle with z axis
alpha = np.arccos(z)
# beta - angle around z axis
beta = np.arctan2(y, x)
return alpha, beta
@staticmethod
def stereoProject(*args):
if len(args) == 3:
alpha, beta = Quat.polarAngles(args[0], args[1], args[2])
elif len(args) == 2:
alpha, beta = args
else:
raise Exception("3 arguments for pole directions and 2 for polar angles.")
alphaComp = np.tan(alpha / 2)
xp = alphaComp * np.cos(beta)
yp = alphaComp * np.sin(beta)
return xp, yp
@staticmethod
def plotLine(startPoint, endPoint, plotSymmetries=False, symGroup=None, res=100, projection=None, ax=None, **kwargs):
if projection is None:
projection = Quat.stereoProject
if ax is None:
ax = plt.gca()
lines = []
lines.append((startPoint, endPoint))
if plotSymmetries:
if symGroup is None:
raise Exception("Please provide a symGroup")
for symm in Quat.symEqv(symGroup)[1:]:
startPointSymm = symm.transformVector(startPoint).astype(int)
endPointSymm = symm.transformVector(endPoint).astype(int)
if startPointSymm[2] < 0:
startPointSymm *= -1
if endPointSymm[2] < 0:
endPointSymm *= -1
lines.append((startPointSymm, endPointSymm))
linePoints = np.zeros((3, res), dtype=float)
for line in lines:
for i in range(3):
if line[0][i] == line[1][i]:
linePoints[i] = np.full(res, line[0][i])
else:
linePoints[i] = | np.linspace(line[0][i], line[1][i], res) | numpy.linspace |
"""
Skeleton example of a Ginga local plugin called 'MyLocalPlugin'
To enable it, run ginga with the command
$ ginga --plugins=MyLocalPlugin
it will then be available from the "Operations" button.
"""
from ginga import GingaPlugin
from ginga.gw import Widgets
# import any other modules you want here--it's a python world!
import os
from datetime import datetime as dt
import numpy as np
from ginga import GingaPlugin, RGBImage, colors
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga.util import dp
from ginga.gw.GwHelp import FileSelection
from astropy.io import fits
from astropy.modeling import models, fitting
from scipy import ndimage
import socket
class CSU_initializer(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
"""
This method is called when the plugin is loaded for the first
time. ``fv`` is a reference to the Ginga (reference viewer) shell
and ``fitsimage`` is a reference to the specific ImageViewCanvas
object associated with the channel on which the plugin is being
invoked.
You need to call the superclass initializer and then do any local
initialization.
"""
super(CSU_initializer, self).__init__(fv, fitsimage)
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_CSU_initializer')
self.settings.setDefaults(ibar_num=1,
mbar_num=1,
ebar_num=1,
move_to_open=False,
bar_dest=0.0,
bar_pos=137.0,
)
self.settings.load(onError='silent')
self.instrument_hosts = ['vm-mosfire', 'nuu', 'vm-mosfirebld']
self.hostname = socket.gethostname().split('.')[0].lower()
self.bars_analysis = None
self.state_analysis = None
self.bars_file = None
self.state_file = None
self.bars_header = None
self.state_header = None
self.layertag = 'bars-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
self.colornames = colors.get_colors()
self.canvas_img = None
self.mfilesel = FileSelection(self.fv.w.root.get_widget())
## Fit relationship between bar position and pixels
tick = dt.now()
pixels, physical = self.get_data()
self.fit_transforms(pixels, physical)
tock = dt.now()
elapsed = (tock-tick).total_seconds()
# print('Completed fit of transforms in {:.3f} s'.format(elapsed))
## Determine slit angle and bar center to center distance in pixels
## from the transformation and the known longslit positions
## in longslit, bar 02 is at 145.472
## in longslit, bar 92 is at 129.480
physical = [ [145.472, self.bar_to_slit(2)],
[129.480, self.bar_to_slit(92)] ]
pixels = self.physical_to_pixel(physical)
dx = pixels[1][0] - pixels[0][0]
dy = pixels[0][1] - pixels[1][1]
self.slit_angle_pix = np.arctan(dx/dy)
# print("Slit Angle on CCD = {:.3f} deg".format(self.slit_angle_pix * 180./np.pi))
self.slit_height_pix = dy / (self.bar_to_slit(92) - self.bar_to_slit(2))
# print("Slit Height on CCD = {:.3f} pix".format(self.slit_height_pix))
def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method may be called many times as the plugin is opened and
closed for modal operations. The method may be omitted if there
is no GUI for the plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msg_font = self.fv.get_font("sansFont", 12)
## -----------------------------------------------------
## Acquire or Load Image
## -----------------------------------------------------
fr = Widgets.Frame("Image the CSU Mask")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_acq_im = Widgets.Button("Acquire Mask Image")
btn_acq_im.add_callback('activated', lambda w: self.acq_mask_image())
btns1.add_widget(btn_acq_im, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
## -----------------------------------------------------
## Analyze Image
## -----------------------------------------------------
fr = Widgets.Frame("Analyze CSU Mask Image")
vbox.add_widget(fr, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(3)
btn_analyze = Widgets.Button("Analyze Mask Image")
btn_analyze.add_callback('activated', lambda w: self.analyze_mask_image())
btns2.add_widget(btn_analyze, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
btn_overlay = Widgets.Button("Overlay Analysis Results")
btn_overlay.add_callback('activated', lambda w: self.overlay_analysis_results())
btns2.add_widget(btn_overlay, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Edit Analysis Results
## -----------------------------------------------------
fr = Widgets.Frame("Edit Analysis Results")
captions = [
("Set Bar Number", 'label',\
'set_ebar_num', 'entry',),\
("Set Position", 'label',\
'set_bar_pos', 'entry'),\
("Edit Bar #", 'label',\
'ebar_num', 'llabel',
'to', 'label',
'bar_pos', 'llabel',
"mm", 'label',\
"Edit Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ebar_num = int(self.settings.get('ebar_num', 1))
b.ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.add_callback('activated', self.set_ebar_num_cb)
b.set_ebar_num.set_tooltip("Set bar number to move")
bar_pos = float(self.settings.get('bar_pos', 0.0))
b.bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.add_callback('activated', self.set_bar_pos_cb)
b.set_bar_pos.set_tooltip("Set distance to move bar")
b.edit_bar.add_callback('activated', lambda w: self.edit_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Bar Overlay
## -----------------------------------------------------
fr = Widgets.Frame("Bar Positions Overlay")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_csu_bar_state = Widgets.Button("From csu_bar_state")
btn_csu_bar_state.add_callback('activated', lambda w: self.overlaybars_from_file())
btns1.add_widget(btn_csu_bar_state, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
btn_fits_header = Widgets.Button("From FITS Header")
btn_fits_header.add_callback('activated', lambda w: self.overlaybars_from_header())
btns1.add_widget(btn_fits_header, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(1)
btn_clear = Widgets.Button("Clear Overlays")
btn_clear.add_callback('activated', lambda w: self.clear_canvas())
btns2.add_widget(btn_clear, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Initialize Bar
## -----------------------------------------------------
fr = Widgets.Frame("Individual Bar Initialization")
captions = [
("Set Bar Number", 'label',\
'set_ibar_num', 'entry',),\
("Initialize Bar #", 'label',\
'ibar_num', 'llabel',\
"Initialize Bar", 'button',\
"Open Before Init", 'checkbutton'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ibar_num = int(self.settings.get('ibar_num', 1))
b.ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.add_callback('activated', self.set_ibar_num_cb)
b.set_ibar_num.set_tooltip("Set bar number to initialize")
b.open_before_init.set_tooltip("Move bar to open position before initialization")
open_before_init = self.settings.get('move_to_open', False)
b.open_before_init.set_state(open_before_init)
b.open_before_init.add_callback('activated', self.open_before_init_cb)
b.initialize_bar.add_callback('activated', lambda w: self.initialize_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Move Bar
## -----------------------------------------------------
# Frame for instructions and add the text widget with another
# blank widget to stretch as needed to fill emp
fr = Widgets.Frame("Individual Bar Control")
captions = [
("Set Bar Number", 'label',\
'set_mbar_num', 'entry',),\
("Set Destination", 'label',\
'set_bar_dest', 'entry'),\
("Move Bar #", 'label',\
'mbar_num', 'llabel',
'to', 'label',
'bar_dest', 'llabel',
"mm", 'label',\
"Move Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
mbar_num = int(self.settings.get('mbar_num', 1))
b.mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.add_callback('activated', self.set_mbar_num_cb)
b.set_mbar_num.set_tooltip("Set bar number to move")
bar_dest = float(self.settings.get('bar_dest', 0.0))
b.bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.add_callback('activated', self.set_bar_dest_cb)
b.set_bar_dest.set_tooltip("Set distance to move bar")
b.move_bar.add_callback('activated', lambda w: self.move_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Spacer
## -----------------------------------------------------
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
## -----------------------------------------------------
## Bottom
## -----------------------------------------------------
# A button box that is always visible at the bottom
btns_close = Widgets.HBox()
btns_close.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns_close.add_widget(btn, stretch=0)
btns_close.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns_close, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
def close(self):
"""
Example close method. You can use this method and attach it as a
callback to a button that you place in your GUI to close the plugin
as a convenience to the user.
"""
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
"""
This method is called just after ``build_gui()`` when the plugin
is invoked. This method may be called many times as the plugin is
opened and closed for modal operations. This method may be omitted
in many cases.
"""
# start ruler drawing operation
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
"""
This method is called when the plugin loses focus.
It should take any actions necessary to stop handling user
interaction events that were initiated in ``start()`` or
``resume()``.
This method may be called many times as the plugin is focused
or defocused. It may be omitted if there is no user event handling
to disable.
"""
pass
def resume(self):
"""
This method is called when the plugin gets focus.
It should take any actions necessary to start handling user
interaction events for the operations that it does.
This method may be called many times as the plugin is focused or
defocused. The method may be omitted if there is no user event
handling to enable.
"""
pass
def stop(self):
"""
This method is called when the plugin is stopped.
It should perform any special clean up necessary to terminate
the operation. The GUI will be destroyed by the plugin manager
so there is no need for the stop method to do that.
This method may be called many times as the plugin is opened and
closed for modal operations, and may be omitted if there is no
special cleanup required when stopping.
"""
pass
def redo(self):
"""
This method is called when the plugin is active and a new
image is loaded into the associated channel. It can optionally
redo the current operation on the new image. This method may be
called many times as new images are loaded while the plugin is
active. This method may be omitted.
"""
pass
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'CSU Initializer Plugin'
## ------------------------------------------------------------------
## Coordinate Transformation Utilities
## ------------------------------------------------------------------
def slit_to_bars(self, slit):
'''Given a slit number (1-46), return the two bar numbers associated
with that slit.
'''
return (slit*2-1, slit*2)
def bar_to_slit(self, bar):
'''Given a bar number, retun the slit associated with that bar.
'''
return int((bar+1)/2)
def pad(self, x):
'''Pad array for affine transformation.
'''
return np.hstack([x, np.ones((x.shape[0], 1))])
def unpad(self, x):
'''Unpad array for affine transformation.
'''
return x[:,:-1]
def fit_transforms(self, pixels, physical):
'''Given a set of pixel coordinates (X, Y) and a set of physical
coordinates (mm, slit), fit the affine transformations (forward and
backward) to convert between the two coordinate systems.
'''
assert pixels.shape[1] == 2
assert physical.shape[1] == 2
assert pixels.shape[0] == physical.shape[0]
# Pad the data with ones, so that our transformation can do translations too
n = pixels.shape[0]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(pixels)
Y = pad(physical)
# Solve the least squares problem X * A = Y
# to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
Ainv, res, rank, s = np.linalg.lstsq(Y, X)
A[np.abs(A) < 1e-10] = 0
Ainv[np.abs(A) < 1e-10] = 0
self.Apixel_to_physical = A
self.Aphysical_to_pixel = Ainv
def pixel_to_physical(self, x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of pixel coordinates (X, Y) to physical coordinates (mm,
slit).
'''
x = np.array(x)
result = self.unpad(np.dot(self.pad(x), self.Apixel_to_physical))
return result
def physical_to_pixel(self, x):
'''Using the affine transformation determined by `fit_transforms`,
convert a set of physical coordinates (mm, slit) to pixel coordinates
(X, Y).
'''
x = | np.array(x) | numpy.array |
# -*- coding: utf-8 -*-
#
from __future__ import division
from mpmath import mp
import numpy
import sympy
from .helpers import cartesian_to_spherical_sympy
from ..helpers import untangle, pm_array0, fsd, pm_array, pm
class McLaren(object):
"""
<NAME>,
Optimal Numerical Integration on a Sphere,
Mathematics of Computation, Vol. 17, No. 84. (Oct., 1963), pp. 361-383,
<https://doi.org/10.1090/S0025-5718-1963-0159418-2>.
"""
def __init__(self, index, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = sympy.sqrt if symbolic else numpy.sqrt
roots = mp.polyroots if symbolic else numpy.roots
if index == 1:
self.degree = 3
data = [(frac(1, 12), fsd(3, (sqrt(frac(1, 2)), 2)))]
elif index == 2:
self.degree = 5
# Stroud doesn't mention u=1, but it's implied. (After all, this is
# integration on a sphere.)
u = 1
r = frac(1, 2)
s, t = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
data = [
(frac(1, 30), fsd(3, (u, 1))),
(frac(1, 30), pm_array([r, s, t])),
(frac(1, 30), pm_array([t, r, s])),
(frac(1, 30), pm_array([s, t, r])),
]
elif index == 3:
self.degree = 7
# the positive roots of
# z^6 - z^4 + 0.2*z^2 - 1/105 = 0,
# i.e., the square roots of the roots of
# z^3 - z^2 + 0.2*z^1 - 1/105 = 0,
r2, s2, t2 = roots([1, -1, frac(1, 5), -frac(1, 105)])
r = sqrt(r2)
s = sqrt(s2)
t = sqrt(t2)
u = numpy.array([+r, -r, +s, -s, +t, -t])
v = numpy.array([+s, +t, +t, +r, +r, +s])
w = numpy.array([+t, +s, +r, +t, +s, +r])
data = [
(frac(1, 24), numpy.column_stack([+u, +v, +w])),
(frac(1, 24), numpy.column_stack([+u, -v, -w])),
(frac(1, 24), | numpy.column_stack([+u, +w, -v]) | numpy.column_stack |
'''
Author: <NAME>
E-mail: <EMAIL>
Here is a implementation of an autoencoder with 4 encoder and decoder layers, based on the Restricted Boltzmann Machine (RBM) in TensorFlow according to Hinton's:
[1] <NAME>* and <NAME>. "Reducing the Dimensionality of Data with Neural Networks"
Science 28 Jul 2006: Vol. 313, Issue 5786, pp. 504-507.
'''
from RBM import RBM
import numpy as np
import tensorflow as tf
from numpy import genfromtxt
class RBM_Weights:
def __init__(self, weights, visBias, hidBias):
self.weights = weights
self.visBias = visBias
self.hidBias = hidBias
def getWeights(self):
return self.weights
def getVisBias(self):
return self.visBias
def getHidBias(self):
return self.hidBias
class AE_RBM:
nEncoderLayers = 4 # number of encoder layers of the autoencoder
sEncoderLayers = None # size of the encoder layers
def __init__ (self, sEncoderLayers):
if(self.nEncoderLayers != sEncoderLayers.shape[0]):
print('Invalid number of size layers')
raise Exception('Autoencoder constructor ERROR !!!')
self.sEncoderLayers = sEncoderLayers
def train(self, trainingData):
rbmList = [] # list RBM's weights
tempData = trainingData
# start RBM's training and get the respective weights
for n in range(self.nEncoderLayers):
if(n==0 or n==(self.nEncoderLayers-1)):
rbm = RBM(tempData, self.sEncoderLayers[n], rbmType='GBRBM')
else:
rbm = RBM(tempData, self.sEncoderLayers[n], rbmType='BBRBM')
print('Start %d RBM training' % (n+1) )
rbm.train(batchSize=100)
[weights, visBias, hidBias] = rbm.getWeights()
rbmList.append(RBM_Weights(weights, visBias, hidBias))
data = tf.convert_to_tensor( tempData, dtype=tf.float32, name='data')
probHid = tf.sigmoid( tf.matmul( data, weights) + hidBias)
hid = tf.cast( tf.greater( probHid, tf.random_uniform( tf.shape(probHid), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
with tf.Session() as sess:
if((self.nEncoderLayers-1) == n):
tempData = sess.run(probHid)
else:
tempData = sess.run(hid)
# start the fine tuning process
return self.fineTuning( rbmList, trainingData)
def fineTuning(self, rbmList, trainingData):
# create the weight variables
layer_01_Weights = tf.Variable(rbmList[0].getWeights(), dtype=tf.float32, name='layer_01_Weights')
layer_01_VisBias = tf.Variable(rbmList[0].getVisBias(), dtype=tf.float32, name='layer_01_VisBias')
layer_01_HidBias = tf.Variable(rbmList[0].getHidBias(), dtype=tf.float32, name='layer_01_HidBias')
layer_02_Weights = tf.Variable(rbmList[1].getWeights(), dtype=tf.float32, name='layer_02_Weights')
layer_02_VisBias = tf.Variable(rbmList[1].getVisBias(), dtype=tf.float32, name='layer_02_VisBias')
layer_02_HidBias = tf.Variable(rbmList[1].getHidBias(), dtype=tf.float32, name='layer_02_HidBias')
layer_03_Weights = tf.Variable(rbmList[2].getWeights(), dtype=tf.float32, name='layer_03_Weights')
layer_03_VisBias = tf.Variable(rbmList[2].getVisBias(), dtype=tf.float32, name='layer_03_VisBias')
layer_03_HidBias = tf.Variable(rbmList[2].getHidBias(), dtype=tf.float32, name='layer_03_HidBias')
layer_04_Weights = tf.Variable(rbmList[3].getWeights(), dtype=tf.float32, name='layer_04_Weights')
layer_04_VisBias = tf.Variable(rbmList[3].getVisBias(), dtype=tf.float32, name='layer_04_VisBias')
layer_04_HidBias = tf.Variable(rbmList[3].getHidBias(), dtype=tf.float32, name='layer_04_HidBias')
# create some placeholders for the model
probHid_01 = tf.placeholder(dtype=tf.float32, name='probHid_01')
hid_01 = tf.placeholder(dtype=tf.float32, name='hid_01')
probHid_02 = tf.placeholder(dtype=tf.float32, name='probHid_02')
hid_02 = tf.placeholder(dtype=tf.float32, name='hid_02')
probHid_03 = tf.placeholder(dtype=tf.float32, name='probHid_03')
probHid_04 = tf.placeholder(dtype=tf.float32, name='probHid_04')
hid_04 = tf.placeholder(dtype=tf.float32, name='hid_04')
recons_04 = tf.placeholder(dtype=tf.float32, name='recons_04')
recons_03 = tf.placeholder(dtype=tf.float32, name='recons_03')
recons_02 = tf.placeholder(dtype=tf.float32, name='recons_02')
recons_01 = tf.placeholder(dtype=tf.float32, name='recons_01')
data = tf.convert_to_tensor( trainingData, dtype=tf.float32, name='visRecs_01')
# W1_Encoder
probHid_01 = tf.sigmoid( tf.matmul( data, layer_01_Weights) + layer_01_HidBias)
hid_01 = tf.cast( tf.greater( probHid_01, tf.random_uniform( tf.shape(probHid_01), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W2_Encoder
probHid_02 = tf.sigmoid( tf.matmul( hid_01, layer_02_Weights) + layer_02_HidBias)
hid_02 = tf.cast( tf.greater( probHid_02, tf.random_uniform( tf.shape(probHid_02), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W3_Encoder
probHid_03 = tf.sigmoid( tf.matmul( hid_02, layer_03_Weights) + layer_03_HidBias)
# W4_Encoder
probHid_04 = tf.sigmoid( tf.matmul( probHid_03, layer_04_Weights) + layer_04_HidBias)
hid_04 = tf.cast( tf.greater( probHid_04, tf.random_uniform( tf.shape(probHid_04), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W4_Decoder
recons_04 = tf.sigmoid( tf.matmul(hid_04, layer_04_Weights, False, True) + layer_04_VisBias)
# W3_Decoder
recons_03 = tf.sigmoid( tf.matmul(recons_04, layer_03_Weights, False, True) + layer_03_VisBias)
# W2_Decoder
recons_02 = tf.sigmoid( tf.matmul(recons_03, layer_02_Weights, False, True) + layer_02_VisBias)
# W1_Decoder
recons_01 = tf.matmul( recons_02, layer_01_Weights, False, True) + layer_01_VisBias
# cost function
error = tf.losses.mean_squared_error(trainingData, recons_01)
# some tensorflow optimizers
#train_op = tf.train.AdagradOptimizer(0.1).minimize(error)
train_op = tf.train.AdadeltaOptimizer(1).minimize(error)
#train_op = tf.train.GradientDescentOptimizer(0.1).minimize(error)
errorArray = np.array([])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
_, auxError = sess.run([train_op, error])
errorArray = np.append(errorArray, auxError)
print('IT: %lf Error: %lf'%(i,auxError))
if(i > 200 and auxError < 0.1):
break
self.layer_01_Weights, self.layer_01_VisBias, self.layer_01_HidBias = sess.run( [layer_01_Weights, layer_01_VisBias, layer_01_HidBias])
self.layer_02_Weights, self.layer_02_VisBias, self.layer_02_HidBias = sess.run( [layer_02_Weights, layer_02_VisBias, layer_02_HidBias])
self.layer_03_Weights, self.layer_03_VisBias, self.layer_03_HidBias = sess.run( [layer_03_Weights, layer_03_VisBias, layer_03_HidBias])
self.layer_04_Weights, self.layer_04_VisBias, self.layer_04_HidBias = sess.run( [layer_04_Weights, layer_04_VisBias, layer_04_HidBias])
self.saveTrainedWeights()
return errorArray
def recon(self, trainingData):
# create some placeholders for the model
probHid_01 = tf.placeholder(dtype=tf.float32, name='probHid_01')
hid_01 = tf.placeholder(dtype=tf.float32, name='hid_01')
probHid_02 = tf.placeholder(dtype=tf.float32, name='probHid_02')
hid_02 = tf.placeholder(dtype=tf.float32, name='hid_02')
probHid_03 = tf.placeholder(dtype=tf.float32, name='probHid_03')
probHid_04 = tf.placeholder(dtype=tf.float32, name='probHid_04')
hid_04 = tf.placeholder(dtype=tf.float32, name='hid_04')
# create the weight variables
layer_01_Weights = tf.Variable(self.layer_01_Weights, dtype=tf.float32, name='layer_01_Weights')
layer_01_VisBias = tf.Variable(self.layer_01_VisBias, dtype=tf.float32, name='layer_01_VisBias')
layer_01_HidBias = tf.Variable(self.layer_01_HidBias, dtype=tf.float32, name='layer_01_HidBias')
layer_02_Weights = tf.Variable(self.layer_02_Weights, dtype=tf.float32, name='layer_02_Weights')
layer_02_VisBias = tf.Variable(self.layer_02_VisBias, dtype=tf.float32, name='layer_02_VisBias')
layer_02_HidBias = tf.Variable(self.layer_02_HidBias, dtype=tf.float32, name='layer_02_HidBias')
layer_03_Weights = tf.Variable(self.layer_03_Weights, dtype=tf.float32, name='layer_03_Weights')
layer_03_VisBias = tf.Variable(self.layer_03_VisBias, dtype=tf.float32, name='layer_03_VisBias')
layer_03_HidBias = tf.Variable(self.layer_03_HidBias, dtype=tf.float32, name='layer_03_HidBias')
layer_04_Weights = tf.Variable(self.layer_04_Weights, dtype=tf.float32, name='layer_04_Weights')
layer_04_VisBias = tf.Variable(self.layer_04_VisBias, dtype=tf.float32, name='layer_04_VisBias')
layer_04_HidBias = tf.Variable(self.layer_04_HidBias, dtype=tf.float32, name='layer_04_HidBias')
data = tf.convert_to_tensor( trainingData, dtype=tf.float32, name='visRecs_01')
# W1_Encoder
probHid_01 = tf.sigmoid( tf.matmul( data, layer_01_Weights) + layer_01_HidBias)
hid_01 = tf.cast( tf.greater( probHid_01, tf.random_uniform( tf.shape(probHid_01), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W2_Encoder
probHid_02 = tf.sigmoid( tf.matmul( hid_01, layer_02_Weights) + layer_02_HidBias)
hid_02 = tf.cast( tf.greater( probHid_02, tf.random_uniform( tf.shape(probHid_02), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W3_Encoder
probHid_03 = tf.sigmoid( tf.matmul( hid_02, layer_03_Weights) + layer_03_HidBias)
# W4_Encoder
probHid_04 = tf.sigmoid( tf.matmul( probHid_03, layer_04_Weights) + layer_04_HidBias)
hid_04 = tf.cast( tf.greater( probHid_04, tf.random_uniform( tf.shape(probHid_04), minval=0, maxval=1, dtype=tf.float32)), dtype=tf.float32)
# W4_Decoder
recons_04 = tf.sigmoid( tf.matmul(hid_04, layer_04_Weights, False, True) + layer_04_VisBias)
# W3_Decoder
recons_03 = tf.sigmoid( tf.matmul(recons_04, layer_03_Weights, False, True) + layer_03_VisBias)
# W2_Decoder
recons_02 = tf.sigmoid( tf.matmul(recons_03, layer_02_Weights, False, True) + layer_02_VisBias)
# W1_Decoder
recons_01 = tf.matmul( recons_02, layer_01_Weights, False, True) + layer_01_VisBias
with tf.Session() as sess:
# run model and return the reconstruction matrix
sess.run(tf.global_variables_initializer())
recons = sess.run(recons_01)
return recons
def loadTrainedWeights(self):
self.layer_01_Weights = genfromtxt('layer_01_Weights.csv', delimiter=' ')
self.layer_01_VisBias = genfromtxt('layer_01_VisBias.csv', delimiter=' ')
self.layer_01_HidBias = genfromtxt('layer_01_HidBias.csv', delimiter=' ')
self.layer_02_Weights = genfromtxt('layer_02_Weights.csv', delimiter=' ')
self.layer_02_VisBias = genfromtxt('layer_02_VisBias.csv', delimiter=' ')
self.layer_02_HidBias = genfromtxt('layer_02_HidBias.csv', delimiter=' ')
self.layer_03_Weights = genfromtxt('layer_03_Weights.csv', delimiter=' ')
self.layer_03_VisBias = | genfromtxt('layer_03_VisBias.csv', delimiter=' ') | numpy.genfromtxt |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
def run_old_oqe(fname, key1, key2, freqrange):
"""
Run old OQE algorithm using capo.
"""
# Legacy functions to load data and estimate power spectrum
import capo
legacy_read_files = capo.miriad.read_files
legacy_group_redundant_bls = capo.red.group_redundant_bls
legacy_oqe = capo.oqe
# (1) Read data from file
s,d,f = legacy_read_files([fname], antstr='all', polstr='xx')
bls = d.keys()
#print("Baseline keys:", bls)
# (1a) Legacy setting to specify whether data are conjugated or not
# PB: Replace this with made up conj array (all set to False)
"""
aa = aipy.cal.get_aa('psa6240_v003', np.array([.15]))
_, conj = legacy_group_redundant_bls(aa.ant_layout)
# conj is a dictionary containing whether bl's are conjugated or not
"""
conj = {bl:False for bl in bls}
# (1b) Build data and flagging dictionaries
data_dict = {}; flg_dict = {}
fmin, fmax = freqrange
for key in bls:
# Use only a restricted band of frequencies (e.g. to avoid RFI)
data_dict[key] = d[key]['xx'][:,fmin:fmax]
flg_dict[key] = np.logical_not(f[key]['xx'][:,fmin:fmax])
# (2) Make dataset object
ds = legacy_oqe.DataSet()
ds.set_data(dsets=data_dict, conj=conj, wgts=flg_dict)
# (3) Calculate unweighted power spectrum
q_I = ds.q_hat(key1, key2, use_cov=False, cov_flagging=False) # unweighted
F_I = ds.get_F(key1, key2, use_cov=False, cov_flagging=False)
M_I, W_I = ds.get_MW(F_I, mode='I')
p_I = ds.p_hat(M_I, q_I)
# (4) Calculate inverse covariance-weighted power spectrum
q = ds.q_hat(key1, key2, use_cov=True, cov_flagging=False) # weighted
F = ds.get_F(key1, key2, use_cov=True, cov_flagging=False)
M, W = ds.get_MW(F, mode='I')
p = ds.p_hat(M, q)
return p_I, p
def run_new_oqe(fname, key1, key2, freqrange):
"""
Run new OQE algorithm using hera_pspec.
"""
from pyuvdata import UVData
import hera_pspec as pspec
# (1) Read data from file
d1 = UVData()
d1.read_miriad(fname)
# (1a) Use only a restricted band of frequencies (e.g. to avoid RFI)
fmin, fmax = freqrange
d1.select(freq_chans=np.arange(fmin, fmax))
# (1b) Build data and flagging lists
d = [d1,]
w = [None for _d in d] # Set weights (None => flags from UVData will be used)
#print("Baseline keys:", d[0].get_antpairs())
# (2) Make PSpecData object
ds = pspec.PSpecData(dsets=d, wgts=w)
# (3) Calculate unweighted power spectrum
ds.set_R('identity')
q_I = ds.q_hat(key1, key2)
F_I = ds.get_G(key1, key2)
M_I, W_I = ds.get_MW(F_I, mode='I')
p_I = ds.p_hat(M_I, q_I)
# (4) Calculate inverse covariance-weighted power spectrum
ds.set_R('iC')
q = ds.q_hat(key1, key2)
F = ds.get_G(key1, key2)
M, W = ds.get_MW(F, mode='I')
p = ds.p_hat(M, q)
#pspec, pairs = ds.pspec(bls, input_data_weight='I', norm='I', verbose=True)
return p_I, p
if __name__ == '__main__':
# Path to datafile
fname = '../data/zen.2458042.12552.xx.HH.uvXAA'
# Baselines to use
key1 = (24, 25)
key2 = (24, 38)
# Frequency channels to include
freqrange = (28, 52)
# Run old OQE
pI_old, p_old = run_old_oqe(fname, key1, key2, freqrange)
print("Old:", p_old.shape)
# Run new OQE
_key1 = (0,) + key1
_key2 = (0,) + key2
pI_new, p_new = run_new_oqe(fname, _key1, _key2, freqrange)
print("New:", p_new.shape)
# Calculate fractional difference of means (averaged over LST)
frac_I = np.mean(pI_new, axis=1).real / np.mean(pI_old, axis=1).real - 1.
frac_iC = np.mean(p_new, axis=1).real / | np.mean(p_old, axis=1) | numpy.mean |
# %% [markdown]
# ## The goal of this notebook:
# investigate regularization approaches, for now, just on the full graph
# these include
# - truncate high degree
# - truncate low degree
# - plus c
# - levina paper on row normalization
# - others?
# %% [markdown]
# ##
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from joblib import Parallel, delayed
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import squareform
from sklearn.cluster import AgglomerativeClustering
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import adjusted_rand_score, pairwise_distances
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
select_dimension,
selectSVD,
)
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.align import Procrustes
from src.cluster import get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.traverse import (
Cascade,
RandomWalk,
TraverseDispatcher,
to_markov_matrix,
to_path_graph,
to_transmission_matrix,
)
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
from graspy.embed import OmnibusEmbed
from umap import UMAP
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
graph_type = "G"
def plot_pairs(
X, labels, model=None, left_pair_inds=None, right_pair_inds=None, equal=False
):
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X)
data["label"] = labels
for i in range(n_dims):
for j in range(n_dims):
ax = axs[i, j]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=j,
y=i,
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=CLASS_COLOR_DICT,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def lateral_omni(adj, lp_inds, rp_inds, n_components=4):
left_left_adj = pass_to_ranks(adj[np.ix_(lp_inds, lp_inds)])
right_right_adj = pass_to_ranks(adj[np.ix_(rp_inds, rp_inds)])
omni = OmnibusEmbed(
n_components=n_components, n_elbows=2, check_lcc=False, n_iter=10
)
ipsi_embed = omni.fit_transform([left_left_adj, right_right_adj])
ipsi_embed = np.concatenate(ipsi_embed, axis=-1)
ipsi_embed = np.concatenate(ipsi_embed, axis=0)
left_right_adj = pass_to_ranks(adj[np.ix_(lp_inds, rp_inds)])
right_left_adj = pass_to_ranks(adj[np.ix_(rp_inds, lp_inds)])
omni = OmnibusEmbed(
n_components=n_components, n_elbows=2, check_lcc=False, n_iter=10
)
contra_embed = omni.fit_transform([left_right_adj, right_left_adj])
contra_embed = np.concatenate(contra_embed, axis=-1)
contra_embed = np.concatenate(contra_embed, axis=0)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
# %% [markdown]
# ##
graph_type = "G"
master_mg = load_metagraph(graph_type, version="2020-04-01")
mg = preprocess(
master_mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
degrees = mg.calculate_degrees()
quant_val = np.quantile(degrees["Total edgesum"], 0.05)
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > quant_val].index
print(quant_val)
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
idx = mg.meta[mg.meta["Pair"].isin(mg.meta.index)].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["pair_td"] = meta["Pair ID"].map(meta.groupby("Pair ID")["Total degree"].mean())
mg = mg.sort_values(["pair_td", "Pair ID"], ascending=False)
meta["inds"] = range(len(meta))
adj = mg.adj.copy()
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
print(len(mg))
# %% [markdown]
# ## Plot the ipsilateral connectomes
if meta["pair_td"].max() > 0:
meta["pair_td"] = -meta["pair_td"]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
left_meta = meta.iloc[lp_inds]
right_meta = meta.iloc[rp_inds]
plot_kws = dict(
plot_type="scattermap",
sort_class="merge_class",
item_order=["pair_td", "Pair ID"],
colors="merge_class",
palette=CLASS_COLOR_DICT,
ticks=False,
class_order="pair_td",
sizes=(1, 1),
gridline_kws=dict(linewidth=0.2, color="grey", linestyle="--"),
)
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
_, _, top, _ = adjplot(ll_adj, ax=axs[0], meta=left_meta, **plot_kws)
top.set_title(r"L $\to$ L")
_, _, top, _ = adjplot(rr_adj, ax=axs[1], meta=right_meta, **plot_kws)
top.set_title(r"R $\to$ R")
plt.tight_layout()
stashfig("ipsilateral-adj")
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[ | np.ix_(rp_inds, lp_inds) | numpy.ix_ |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 10:00:16 2018
@author: DaniJ
This module is supossed to contain the algorithms and information of Chemical speciation plus sorption.
It is a daughter of Database_SC but it can be used without a database.
[If feasible (question of time), I will keep it apart]
"""
from Database_SC import Database_SC
import numpy as np
from scipy import linalg
import scipy.integrate as integrate
from scipy import optimize
#import scipy as sp
class ChemSys_Surf (Database_SC):
'''
ChemSys is a daughter class from Database_SC which is a daughter class of Database. Hence, they depend on these parameters.
#Note for myself and other contributors, if you add or delete properties or methods of the class, documeted it here. Otherwise, it is a little caos (regarding my own experience)
properties:
Faraday_constant
temperature
dielectric_constant
permittivity_free_space
A_activitypar
B_activitypar
universal_gas_constant
ionic_strength_constant
fix_ionic_strength
S
S_electro
names_elec_sorpt
length_names_elec_sorpt
U
A_Borkovec
B_Borkovec
A_Borkovec_columns
A_Borkovec_rows
aq_u_vector
waterdensity
index_related_sorpt_pri
methods:
set_S
set_vector_aqueous_component_value
set_names_electrostatic_variables
set_electro_sorption_stoichiometric_M
set_universal_gas_constant
set_Faraday_constant
set_temperature
set_dielectric_constant
set_constant_ionic_strength
set_permittivity_free_space
calculate_dielectric_constant
calculate_A_activitypar
calculate_B_activitypar
calculate_ionic_strength
calculate_waterdensity
calculate_u_electro
define_system_from_input_and_database
create_S
create_U
remove_electro_mass_from_U
separte_S_into_S1_and_S2
create_electro_sorption_stoichiometric_M
create_stoichiometric_surfacepotential
search_index_list_classlist
search_index_list_listdictionaryreactions
instantiation_step
speciation_Westall1980_CCM # NOTE --> probably speciation_Westall1980_CCM, speciation_Westall1980_TLM can be unified in one algorithm, so far it is kept separated.
speciation_Westall1980_TLM #
create_sorpt_vec
Boltzman_factor_2_psi
Jacobian_Speciation_Westall1980
print_speciation
speciation_Borkovec_1983_DLM
get_z_vector
calculate_log_activity_coefficient_aq_pri_species
calculate_log_activity_coefficient_aq_sec_species
NOTE: Remark that ChemSys_Surf is a daughter class from Database_SC. Therefore, in order to create the pseudo S matrix (The stoichiometric matrix that does not contain the surface potential as unknown). Methods like ...
... set_names_aq_primary_species (names_aq_pri_sp), set_names_aq_secondary_species (names_aq_sec_sp), set_names_sorpt_primary_species (names_sorpt_pri_sp), set_names_sorpt_secondary_species (names_sorpt_sec_sp), set_aq_list_pri_class (list_aq_pri_sp), ...
... set_aq_list_sec_class (list_aq_sec_sp) can be used and must be used. However, it has to be check that the input given is in accordance with the own system, that can be done by ???????????
'''
# Constructor
def __init__(self):
self.Faraday_constant = 96485.3328959 # C/mol
self.temperature = (273.15+25) # It assumed that initially we are at T=25°C and we assume atmospheric pressure for dielectric and other constants
self.universal_gas_constant = 8.314472 # J/(K*mol)
self.permittivity_free_space = 8.854187871e-12## Farrads = F --> F/m = C^2/(J*m) ALSO called vacuum permittivity, electri constant or distributed capacitance of the vacuum
self.calculate_dielectric_constant()
self.calculate_waterdensity()
self.calculate_A_activitypar()
self.calculate_B_activitypar()
self.ionic_strength_constant = False
pass
# Instantiation of main attributes
def define_system_from_input_and_database (self, database, n_aq_prim, list_aq_val, name_sorpt_pri, List_pri_sorpt_class = None):
'''
Given a database, the list of aqueous primary species, the list of aqueous values for the components associated to the primary species, the list of sorption of primary species
The system is defined.
As extra List_pri_sorpt_class is given to update some species. list_sorpt_pri == list_pri_sorpt_class[i].name for i in length.
'''
# check that list_sorpt_pri is coherent with List_pri_sorpt_class
assert len(n_aq_prim) == len(list_aq_val), \
"The length of the aqueous primary species and the aqueous component values is not equal."
if List_pri_sorpt_class is not None:
assert len(name_sorpt_pri) == len(List_pri_sorpt_class), \
"The length of the sorption primary species and the sorption list classes is not equal."
for i in range(0, len(name_sorpt_pri)):
assert i == name_sorpt_pri.index(List_pri_sorpt_class[i].name), 'The name or order of the list of names of sorption primary species and the list of classes of sorption primary species is not coherent.'
# Instantiation of main attributes (Although not necessary, it is useful to keep sense)
names_aq_pri_sp = n_aq_prim
names_aq_sec_sp = []
list_aq_pri_sp = []
list_aq_sec_sp = []
list_aq_reactions = []
names_sorpt_pri_sp = name_sorpt_pri
names_sorpt_sec_sp = []
if List_pri_sorpt_class is not None:
list_sorpt_pri_sp = List_pri_sorpt_class
else:
list_sorpt_pri_sp = []
list_sorpt_sec_sp = []
list_sorpt_reactions = []
# Drawn the list_aq_pri_sp & list_sorpt_pri_sp(if necessary) from Database
index_list_pri_aq = self.search_index_list_classlist (names_aq_pri_sp, database.names_aq_pri_sp)
for i in index_list_pri_aq:
list_aq_pri_sp.append(database.list_aq_pri_sp[i])
if List_pri_sorpt_class is None:
index_list_sorpt = self.search_index_classlist_list (names_sorpt_pri_sp, database.names_sorpt_pri_sp)
for i in index_list_sorpt:
list_sorpt_pri_sp.append(database.list_sorpt_pri_sp[i])
# Obtain list_aq_reactions, list_aq_sec_sp and names_aq_sec_sp from names_aq_pri_sp
index_aq_reactions, names_aq_sec_sp = self.search_index_list_listdictionaryreactions (names_aq_pri_sp, database.list_aq_reactions)
index_list_sec_aq = self.search_index_list_classlist (names_aq_sec_sp, database.names_aq_sec_sp)
for i in index_list_sec_aq:
list_aq_sec_sp.append(database.list_aq_sec_sp[i])
for i in index_aq_reactions:
list_aq_reactions.append(database.list_aq_reactions[i])
# Obtain list_sorpt_reactions, list_sorpt_sec_sp and names_sorpt_sec_sp from names_aq_pri_sp + names_aq_sec_sp + names_sorpt_pri_sp
index_sorpt_reactions, names_sorpt_sec_sp = self.search_index_list_listdictionaryreactions (names_aq_pri_sp + names_aq_sec_sp + names_sorpt_pri_sp, database.list_sorpt_reactions)
index_list_sec_sorpt = self.search_index_list_classlist (names_sorpt_sec_sp, database.names_sorpt_sec_sp)
for i in index_list_sec_sorpt:
list_sorpt_sec_sp.append(database.list_sorpt_sec_sp[i])
for i in index_sorpt_reactions:
list_sorpt_reactions.append(database.list_sorpt_reactions[i])
# Instantiation of main variables, hence definition of system to study
self.set_names_aq_primary_species (names_aq_pri_sp)
self.set_names_aq_secondary_species (names_aq_sec_sp)
self.set_names_sorpt_primary_species ( names_sorpt_pri_sp)
self.set_names_sorpt_secondary_species (names_sorpt_sec_sp)
self.set_aq_list_pri_class (list_aq_pri_sp)
self.set_aq_list_sec_class (list_aq_sec_sp)
self.set_sorpt_list_pri_class (list_sorpt_pri_sp)
self.set_sorpt_list_sec_class (list_sorpt_sec_sp)
self.set_aq_reactions_list (list_aq_reactions)
self.set_sorpt_reactions_list (list_sorpt_reactions)
self.set_vector_aqueous_component_value(list_aq_val)
def set_constant_ionic_strength (self, givenvalue):
'''
set the ionic_strength to a given value
'''
self.ionic_strength_constant = True
self.fix_ionic_strength = givenvalue
# Matrix_Creation_From_Database
def create_S (self):
# First we create the pseudoS matrix (if it does not exist) which has the following structure:
# Number_aqueous_primary_sp Number_sorption_primary_sp Number_aqueous_secondary_sp Number_sorption_secondary_sp
# n_aqueousR1 | |
# pseudoS = nRn | |
# n_sorptionR1 | Stoichiometric values |
# nRm | |
#
#
# Remark: pseudoS is a matrix that is almost the sorption stoichiometric matrix.
# The order of the columns is given by the Number_aqueous_primary_sp + Number_sorption_primary_sp + Number_aqueous_secondary_sp + Number_sorption_secondary_sp
# The order of the rows is first number of aqueous reactions followed by the number of the sorption reactions.
if not hasattr(self, 'pseudoS'):
self.create_pseudo_S()
# Now the electrostatic variables must be added. These variables are treated as chemical species. They will be introduced between Number_sorption_primary_sp and Number_aqueous_secondary_sp.
#
# Each primary sorption class should have an attribute called type_sorption. The attribute will determine the number of surface potential variables that must be added to the stoichiometric matrix.
# -CCM will add only one.
#
#
# for the number of rows. Reactions that are aqueous have 0 has stoichiometric value. The stoichiometric values for the added surface potential species is obtained by the type of sorption and b the stoichiometric_value and the charge.
if not hasattr(self, 'S_electro') or not hasattr(self, 'pseudoS_length_rows'):
self.create_electro_sorption_stoichiometric_M ()
# defining length and names of columns
self.S_names_columns = self.names_aq_pri_sp + self.names_sorpt_pri_sp + self.names_elec_sorpt + self.names_aq_sec_sp + self.names_sorpt_sec_sp
self.S_length_columns = len(self.pseudoS_names_columns) + len(self.names_elec_sorpt)
# defining length of rows
self.S_length_rows = len(self.list_aq_reactions) + len(self.list_sorpt_reactions)
pseudo_S = self.pseudoS.copy()
S_electro = self.S_electro.copy()
pos_1 = self.length_aq_pri_sp + self.length_sorpt_pri_sp
S = np.concatenate((np.concatenate ((pseudo_S[:,:pos_1], S_electro), axis = 1), pseudo_S[:,pos_1:]), axis = 1)
assert self.S_length_rows == S.shape[0]
assert self.S_length_columns == S.shape[1]
self.S = S
# Creation of the Component matrix, [Westall does not really make a difference between stoichiometric matrix and U matrix, since somehow they are related]
def create_U (self):
if not hasattr(self, 'S'):
self.create_S ()
S1, S2 = self.separte_S_into_S1_and_S2()
npri = self.length_aq_pri_sp +self.length_sorpt_pri_sp + self.length_names_elec_sorpt
I = np.identity(npri)
Stop=-np.matmul(S1.transpose(), linalg.inv(S2.transpose()))
U = np.concatenate((I, Stop), axis=1)
U = self.remove_electro_mass_from_U (U)
self.U = U
# remove_electro_mass_from_U ()
def remove_electro_mass_from_U (self, U):
'''
This methods should be used only in create_U not outside it.
'''
npri = self.length_aq_pri_sp +self.length_sorpt_pri_sp
for i in range(0, self.length_names_elec_sorpt):
U[npri, npri] = 0
npri += 1
return U
# Separate matrix from Primary and Secondary species
def separte_S_into_S1_and_S2 (self):
'''
Separates primary and Secondary species matrices.
e.g.:
Sp1 Sp1 Sp2
R1 || x11 x12 x13 || || x11 x12 || || x11 ||
S = R2 || x21 x22 x23 || in to S1 = || x21 x22 || and S2= || x21 ||
R3 || x31 x32 x33 || || x31 x32 || || x32 ||
'''
np = self.length_aq_pri_sp +self.length_sorpt_pri_sp + len(self.names_elec_sorpt)
S1 = self.S[:, 0:np].copy()
S2 = self.S[:, np:].copy()
return S1, S2
# The stoichiometric matrix derived from sorption species.
def create_electro_sorption_stoichiometric_M (self):
'''
The function assumes that some variables are already defined
'''
# create list of new boltzman surface potential variables from sorption species
self.names_elec_sorpt = []
self.index_related_sorpt_pri = []
for i in range(0,self.length_sorpt_pri_sp):
if hasattr(self.list_sorpt_pri_sp[i], 'type_relation'): # related species should be defined in the list_sorpt_pri_sp after the leading species.
self.index_related_sorpt_pri.append(self.names_sorpt_pri_sp.index(self.list_sorpt_pri_sp[i].type_relation))
elif isinstance(self.list_sorpt_pri_sp[i].names_Boltz_psi, str):
self.names_elec_sorpt.append(self.list_sorpt_pri_sp[i].names_Boltz_psi)
elif isinstance(self.list_sorpt_pri_sp[i].names_Boltz_psi, list):
for j in range(0, len(self.list_sorpt_pri_sp[i].names_Boltz_psi)):
self.names_elec_sorpt.append(self.list_sorpt_pri_sp[i].names_Boltz_psi[j])
self.length_names_elec_sorpt = len(self.names_elec_sorpt)
# Block
if not hasattr(self, 'pseudoS_length_rows'):
# self.pseudoS_length_rows = len(self.list_aq_reactions) + len(self.list_sorpt_reactions)
self.pseudoS_length_rows = self.length_aq_sec_sp + self.length_sorpt_sec_sp
S_electro = np.zeros((self.pseudoS_length_rows, self.length_names_elec_sorpt))
col_position = 0
track_dict = {}
counter = 0
for i in range(0, self.length_sorpt_pri_sp):
if hasattr(self.list_sorpt_pri_sp[i], 'type_relation'): # related species should be defined in the list_sorpt_pri_sp after the leading species.
sub_B = self.create_stoichiometric_surfacepotential (self.names_sorpt_pri_sp[i], self.list_sorpt_pri_sp[self.index_related_sorpt_pri[counter]].type_sorption)
ind_start = track_dict['start_'+ self.names_sorpt_pri_sp[self.index_related_sorpt_pri[counter]]]
ind_end =track_dict['end_'+ self.names_sorpt_pri_sp[self.index_related_sorpt_pri[counter]]]
if len(sub_B.shape) == 1:
S_electro[:, ind_start:ind_end] = S_electro[:, ind_start:ind_end] + sub_B.reshape(sub_B.shape[0],1)
else:
S_electro[:, ind_start:ind_end] = S_electro[:, ind_start:ind_end] + sub_B
counter += 1
else:
sub_B = self.create_stoichiometric_surfacepotential (self.names_sorpt_pri_sp[i], self.list_sorpt_pri_sp[i].type_sorption)
if len(sub_B.shape) == 1:
S_electro[:, col_position] = sub_B
track_dict['start_'+self.names_sorpt_pri_sp[i]] = col_position
col_position += 1
track_dict['end_'+self.names_sorpt_pri_sp[i]] = col_position
elif len(sub_B.shape) == 2:
old_col_position = col_position
col_position = col_position + sub_B.shape[1]
S_electro[:, old_col_position:col_position] = sub_B
track_dict['start_'+self.names_sorpt_pri_sp[i]] = old_col_position
track_dict['end_'+self.names_sorpt_pri_sp[i]] = col_position
self.S_electro = S_electro
# creates stoichiometric blocks
def create_stoichiometric_surfacepotential (self, name_pri_sp, type_sorpt):
'''
'''
if type_sorpt == 'CCM' or type_sorpt == 'DLM':
d = np.zeros((self.length_aq_sec_sp + self.length_sorpt_sec_sp))
for i in range(0, self.length_sorpt_sec_sp):
if self.list_sorpt_reactions[i].is_species_in_reaction (name_pri_sp):
names_species_in_reaction = [*self.list_sorpt_reactions[i].reaction]
summ_charges_times_stoichiometric = 0
for j in names_species_in_reaction:
if j in self.names_aq_pri_sp:
z = self.list_aq_pri_sp[self.names_aq_pri_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
summ_charges_times_stoichiometric = summ_charges_times_stoichiometric + (n*z)
elif j in self.names_aq_sec_sp:
z = self.list_aq_sec_sp[self.names_aq_sec_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
summ_charges_times_stoichiometric = summ_charges_times_stoichiometric + (n*z)
d[self.length_aq_sec_sp + i] = summ_charges_times_stoichiometric
elif type_sorpt == 'TLM':
d = np.zeros(((self.length_aq_sec_sp + self.length_sorpt_sec_sp), 3))
for i in range(0, self.length_sorpt_sec_sp):
if self.list_sorpt_reactions[i].is_species_in_reaction (name_pri_sp):
names_species_in_reaction = [*self.list_sorpt_reactions[i].reaction]
summ_charges_times_stoichiometric_o = 0
summ_charges_times_stoichiometric_b = 0
for j in names_species_in_reaction:
if j in self.names_aq_pri_sp:
z = self.list_aq_pri_sp[self.names_aq_pri_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
if j =='H+' or j == 'OH-':
summ_charges_times_stoichiometric_o = summ_charges_times_stoichiometric_o + (n*z)
else:
summ_charges_times_stoichiometric_b = summ_charges_times_stoichiometric_b + (n*z)
elif j in self.names_aq_sec_sp:
z = self.list_aq_sec_sp[self.names_aq_sec_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
if j =='H+' or j == 'OH-':
summ_charges_times_stoichiometric_o = summ_charges_times_stoichiometric_o + (n*z)
else:
summ_charges_times_stoichiometric_b = summ_charges_times_stoichiometric_b + (n*z)
d[self.length_aq_sec_sp + i, 0] = summ_charges_times_stoichiometric_o
d[self.length_aq_sec_sp + i, 1] = summ_charges_times_stoichiometric_b
return d
def get_z_vector(self):
z =[]
for i in range(0, self.length_aq_pri_sp):
# if type(self.list_aq_pri_sp[i]) == Aq_Species:
z.append(self.list_aq_pri_sp[i].charge)
for i in range(0, self.length_aq_sec_sp):
z.append(self.list_aq_sec_sp[i].charge)
return z
def search_index_list_classlist (self, list1, list2):
'''
The function returns a list of indices of the position of list1 in list2. --> E.g. list1 =[a c], list2 = [a b c d] function returns listindices = [1,3]
Precondition1: list1 <= list2
Precondition2: list1 is completely include in list2. Otherwise an error occurs
'''
assert len(list1) <= len(list2), "List of species in the chemical system must be equal or smaller than the list os primary species on the database"
list_indices = []
for i in list1:
# appends the index of the list2 that coincide with list1.
list_indices.append(list2.index(i))
return list_indices
def search_index_list_listdictionaryreactions (self, list1, list_dictionaries):
'''
The function returns two list. One with the indices of the reactions that occur in the ChemSys_Surf according to the inputed dictionary, and the other the secondary species in each reaction.
Both, list are in agremment. e.g. l_ind_reaction = [0, 4, 6, 9], l_secondary_species = ['A' 'B' 'C' 'F'] From reaction 0 of the database the secondary species obtained is A, from 6 is C, and so on.
'''
index_reactions = []
name_aq_sec_sp = []
for i in range(0, len(list_dictionaries)):
temp_dict = list_dictionaries[i]
temp_dict_list_keys = list(temp_dict.reaction.keys())
n_s = 0
for j in temp_dict_list_keys:
count = list1.count(j)
if count != 1 and count != 0:
raise ValueError('[ChemSys class, method Index_ReactionsinDatabase] It seems that the name_primary_species property is wrong.')
elif count == 0:
n_s += 1
n_s_name = j
if n_s == 1:
index_reactions.append(i)
name_aq_sec_sp.append(n_s_name)
return index_reactions, name_aq_sec_sp
# Creating first pseudoS
#Setters
# set stoichiometric Matrix
def set_S (self, S, names_species_columns):
self.S = S
self.S_length_rows = S.shape[0]
self.S_length_columns = S.shape[1]
self.S_names_columns = names_species_columns
assert len(names_species_columns) == self.S_length_columns, 'The columns must have the same size that the list of strings containing the name of the species.'
# aqueous component vector
def set_vector_aqueous_component_value(self, list_aq_val):
'''
The value of vector
'''
self.aq_u_vector = list_aq_val
# set names_electrostatic_variables
def set_names_electrostatic_variables (self, names_elsctrostatic_var):
'''
The name of the electrostatic potentials that must be taken into account.
Preferible define them using create_electro_sorption_stoichiometric_M
Since the names_elsctrotatic_var and the amount in general should be related to a surface
'''
self.names_elec_sorpt = names_elsctrostatic_var
self.length_names_elec_sorpt = len(self.names_elec_sorpt)
# set the stoichiometric matrix given by
def set_electro_sorption_stoichiometric_M (self, S_electro):
'''
The S matrix defined having as columns the surface variable potentials and as rows the reactions.
Preferible define them using create_electro_sorption_stoichiometric_M
'''
self.S_electro = S_electro
# Faraday constant
def set_Faraday_constant (self, new_value):
'''
The Faraday constant is instantiated with the class. The Faraday constant has the value 96485.33289(59) C mol−1 [Obtained from WIKI: https://en.wikipedia.org/wiki/Faraday_constant]
The constant is the relationship between the elementary charge or the magnitude of the charge of an electron ['e'] and the Avogrado constant (The number of particles in a mol) [NA]
F = e * NA
e ≈ 1.60217662×10−19 C
NA ≈ 6.02214086×1023 mol−1
Note of one of the authors: I do not think that it should be modified but maybe someone what to play with the value
'''
self.Faraday_constant = new_value
# Temperature
def set_temperature(self, new_T):
'''
Temperature is supposed to be given in kelvins.
'''
self.temperature = new_T
# Universal gas constant
def set_universal_gas_constant (self, r_value):
'''
Set the universal gas constant
'''
self.universal_gas_constant = r_value
# dielectric constant
def set_dielectric_constant (self, e_c):
'''
Set the dielectric constant of water
'''
self.dielectric_constant = e_c
def set_permittivity_free_space (self, eo):
'''
Set permittivity of the free space, or distributed capacitance of the vacuum or vacuum permittivity etc
Not recommended to be used. Unless sure of what are you doing
'''
self.permittivity_free_space = eo
# Calculations
# Dielectric constant of water
def calculate_dielectric_constant(self):
'''
Calculates the dielectric constant
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
self.dielectric_constant = 2727.586 + 0.6224107*self.temperature - 466.9151*np.log(self.temperature) - (52000.87/self.temperature)
def calculate_A_activitypar (self):
'''
Calculates the parameter A of the Debye Hueckel equation
The units are supossed to be kg^(1/2)/mol^(1/2)
Actually if you want the L/mol is possible to divide by the square of the density to obtain such value
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
A = 1.82483e6*np.sqrt(self.waterdensity)
B = (self.temperature*self.dielectric_constant)**(3/2)
self.A_activitypar = A/B
def calculate_B_activitypar (self):
'''
Calculates the parameter A of the Debye Hueckel equation
The units are supossed to be kg^(1/2)/mol^(1/2)*cm
Actually if you want the L/mol is possible to divide by the square of the density to obtain such value
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
Here the equation is a bit different than that given in the book. The book takes the equation from
Theoretical prediction of the thermodynamic behavior of aqueous electrolytes at high pressures and temperatures; II, Debye Huckel parameters for activity coefficients and relative partial molal properties
The differences is 10-8 and is related to the fact that they uses angstroms instead of cm
'''
A = 50.29158649e8*np.sqrt(self.waterdensity)
B = np.sqrt(self.temperature*self.dielectric_constant)
self.B_activitypar = A/B
def calculate_waterdensity (self):
'''
Calculates the density of the water
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
Tc = self.temperature - 273.15
A = (Tc-3.9863)**2
B = Tc + 288.9414
C = Tc + 68.12963
D = (A*B)/(508929.2*C)
E = 0.011445*np.exp(-374.3/Tc)
self.waterdensity = 1 - D + E
############################################################################
##### instantiation_step ()
#####
#############################################################################
def instantiation_step (self, type_I=1):
'''
'''
if type_I == 1:
c_ini = np.ones(self.S_length_columns)*1e-3
return c_ini
############################################################################################################################################################
################# Speciation and related algorithms ########################################################################################################
############################################################################################################################################################
#
def speciation_Westall1980_CCM (self, tolerance = 1e-6, max_iterations = 100, c_guess = None):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39
'''
# instantiation of unknowns
if np.any(c_guess == None):
c_guess = self.instantiation_step (type_I = 1)
c_n =c_guess
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
S1, S2 = self.separte_S_into_S1_and_S2()
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
# instantiation variables for loop
counter_iterations = 0
err = tolerance + 1
while err>tolerance and counter_iterations < max_iterations:
# Calculate U vector [If I am not wrong T_sigma must be calculated at every step, since it depends somehow in the surface potential, and it is unknown]
u_electro = self.calculate_u_electro(c_n[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
# Calculate f or better said in this specific case Y
Y = self.U.dot(c_n) - T
# Calculate Z
Z = self.Jacobian_Speciation_Westall1980(c_n, pos_start_elec, pos_end_elec)
# Calculating the diff, Delta_X
# In the paper Delta_X is X_old - X_new or as they called X_original - X_improved.
# I am writing X_new- X-old, hence I use -Y instead of Y.
delta_X = linalg.solve(Z,-Y)
# The error will be equal to the maximum increment
err = max(abs(delta_X))
# Relaxation factor borrow from <NAME> to avoid negative values
max_1 = 1
max_2 =np.amax(-2*np.multiply(delta_X, 1/c_n[0:pos_end_elec]))
Max_f = np.amax([max_1, max_2])
Del_mul = 1/Max_f
# Update
c_n[0:pos_end_elec] = c_n[0:pos_end_elec] + Del_mul*delta_X # Update primary species
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(c_n[0:pos_end_elec]))) # Update secondary
c_n[pos_end_elec:] =10**log_c2
counter_iterations += 1
if counter_iterations >= max_iterations:
raise ValueError('Max number of iterations surpassed.')
self.c = c_n
return c_n
def speciation_Westall1980_CCM_v2 (self, tolerance = 1e-6, max_iterations = 100, x = None):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39
'''
# scipy.optimize.newton(func, x0, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True)[source]
S1, S2 = self.separte_S_into_S1_and_S2()
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
#c_pri = optimize.newton(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
c_pri = optimize.fsolve(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(c_pri))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((c_pri, c2))
self.c = c_n
return c_n
def func_newton (self, x, T_chem, pos_start_elec, pos_end_elec, S1, S2):
'''
x is the vector of primary species
'''
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(x))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((x, c2))
u_electro = self.calculate_u_electro(x[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
Y = self.U.dot(c_n) - T
return Y
def Jacobian_Speciation_Westall1980_func (self, x, T_chem, pos_start_elec, pos_end_elec, S1, S2):
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(x))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((x, c2))
return self.Jacobian_Speciation_Westall1980(c_n, pos_start_elec, pos_end_elec)
def speciation_Westall1980_v3 (self, tolerance = 1e-6, max_iterations = 100, Ln_x = None, activity_b = False):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39.
That is the third version, here we will try to work with ln(X) as primary species instead of X. Such thing have an effect in the formulation.
Specifically, the Newton-Rapshon jacobian of the system should become symetric (I am not taking into account activity, not sure if using activity and its derivatives the matrix is still symetric)
The activity_b is just a boolean that if true, the speciaiton of the secondary species in is done by substitution of
'''
# scipy.optimize.newton(func, x0, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True)[source]
S1, S2 = self.separte_S_into_S1_and_S2()
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
lnK = self.log_k_vector/np.log10(np.e) # Changing the base from log_10 to ln (log_e)
#c_pri = optimize.newton(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
ln_c_pri = optimize.fsolve(self.residual_fun_v3, Ln_x, args = (lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b), fprime = self.Jacobian_Residual_fun_v3)
ln_c2 = np.matmul(linalg.inv(S2), lnK - np.matmul(S1, ln_c_pri))
c1 = np.exp(ln_c_pri)
c2 = np.exp(ln_c2)
c_n = np.concatenate ((c1, c2))
self.c = c_n
return c_n
def residual_fun_v3 (self, x, lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b):
'''
This functions is not the 3rd version of an old function but it is related to the speciation_Westall1980_v3.
The main algorithm uses the algorithms and formulas that can be found on the Westall paper but for the unknown variables it relies on ln X variables instead of just X variables.
The function that I must bild is still Y = U*c -T
what changes is how the c parameters are obtained. Before we assumed that our indepent variable was a sort of concentration, now the variable is exactly the lnX of the sort of concentration
Hence the ecuation for c is translated into:
c = exp(lnKi+sum(aik*lnX))
but since we are using the stoichiometric matrix the relationship will be
lnC2 = inv(S2)*lnk - inv(S2)*S1*lnX
and c is the concatenation of c = exp(lnX) and exp(lnC2)
'''
if activity_b == False:
c_n = self.speciation_no_activity_v3 (lnK, S1, S2, x)
elif activity_b == True:
c_n = self.speciation_activity_v3 (lnK, S1, S2, x)
c1 = np.exp(x)
u_electro = self.calculate_u_electro(c1[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
Y = self.U.dot(c_n) - T
return Y
def Jacobian_Residual_fun_v3 (self, x, lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b):
'''
This functions is not the 3rd version of an old function but it is related to the speciation_Westall1980_v3.
'''
if activity_b == False:
c_n = self.speciation_no_activity_v3 (lnK, S1, S2, x)
elif activity_b == True:
c_n = self.speciation_activity_v3 (lnK, S1, S2, x)
return self.Jacobian_Speciation_Westall1980_modification_lnX (c_n, pos_start_elec, pos_end_elec)
def speciation_no_activity_v3 (self, lnK, S1, S2, x):
ln_c2 = np.matmul(linalg.inv(S2), lnK - np.matmul(S1, x))
c1 = np.exp(x)
c2 = np.exp(ln_c2)
c_n = np.concatenate ((c1, c2))
return c_n
def speciation_activity_v3 (self, lnK, S1, S2, x):
c_1 = np.exp(x)
c_2 = np.zeros(S2.shape[1])
c_2 = self.subfunction_of_speciation_activity_v3 (c_2, c_1, lnK, S1, S2)
c_2 = optimize.fixed_point(self.subfunction_of_speciation_activity_v3, c_2, args = (c_1, lnK, S1, S2))
#
# tolerance = 1e-8
# n_max_iterations = 100
#error = 1
# I need to implement some sort of Picard method
#c_1 = np.exp(x)
#c_2 = np.zeros(S2.shape[1])
# c_k = self.subfunction_of_speciation_activity_v3 (c_2, c_1, lnK, S1, S2)
#counter = 0
#while error > tolerance and counter < n_max_iterations:
# c_k1 = self.subfunction_of_speciation_activity_v3 (c_k, c_1, lnK, S1, S2)
# error = max(abs(c_k1-c_k))
# print(error)
#c_k = c_k1.copy()
#counter += 1
#if counter >= n_max_iterations:
# raise ValueError('Max number of iterations surpassed in speciation_activity_v3 (self, lnK, S1, S2, x.')
c_n = | np.concatenate((c_1, c_2)) | numpy.concatenate |
""" state, observation and action spaces """
from collections import namedtuple, OrderedDict
from io import BytesIO
from itertools import product
from os.path import join
import pkg_resources
import numpy as np
import pandas as pd
import energypy as ep
from energypy.common.spaces import DiscreteSpace, ContinuousSpace
# used in envs
PrimitiveConfig = namedtuple(
'primitive', ['name', 'low', 'high', 'type', 'data']
)
primitive_register = {
'discrete': DiscreteSpace,
'continuous': ContinuousSpace
}
class Space(OrderedDict):
def __init__(self, name):
super().__init__()
self.name = name
self._shape = None
def __repr__(self):
return('<{} space {}>'.format(self.name, self.shape))
@property
def shape(self):
return self._shape
@shape.getter
def shape(self):
return (len(self.keys()), )
@property
def low(self):
return self._low
@low.getter
def low(self):
return np.array([spc.low for spc in self.values()]).reshape(*self.shape)
@property
def high(self):
return self._high
@high.getter
def high(self):
return np.array([spc.high for spc in self.values()]).reshape(*self.shape)
def sample(self):
return np.array([spc.sample() for spc in self.values()]).reshape(1, *self.shape)
def contains(self, x):
return all(spc.contains(part) for (spc, part) in zip(self.values(), x[0]))
def from_primitives(self, *primitives):
for p in primitives:
self[p.name] = primitive_register[p.type](p.name, p.low, p.high, data=p.data)
self.num_samples = self.set_num_samples()
return self
def append(self, primitive):
p = primitive
self[p.name] = primitive_register[p.type](p.name, p.low, p.high, data=p.data)
self.num_samples = self.set_num_samples()
return self
def set_num_samples(self):
num_samples = []
for name, space in self.items():
if isinstance(space.data, str):
assert space.data == 'append'
else:
num_samples.append(np.array(space.data).shape[0])
if num_samples:
assert max(num_samples) == min(num_samples)
return max(num_samples)
else:
return None
class StateSpace(Space):
def __init__(self, name='state'):
super().__init__(name=name)
def __call__(self, steps, offset, append=None):
"""
steps = num steps through episode
start = offset for start of episode
end = offset for end of episode
append = {name: data}, data from env appended to state / obs
"""
data = []
for name, space in self.items():
if space.data == 'append':
# if isinstance(space.data, str):
assert space.data == 'append'
data.append(append[name])
elif space.data is not None:
data.append(space(steps, offset))
else:
raise ValueError
return | np.array(data) | numpy.array |
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"Function" submodule
This sub-package contains some functions required for the "Hamiltonian"
sub-package.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
from scipy import constants
from MajoranaNanowires.third_functions import pfaffian as pf
#%% ############################# Functions
#%%
def FermiDirac(E,kT,mu=0):
"""
Computes the Fermi-Dirac distribution.
Parameters
----------
E: scalar or arr
Energies.
kT: scalar
Temperature (in units of energy).
mu: scalar or arr
Fermi energy.
Returns
-------
result: scalar or arr
Fermi-Dirac distribution for the given energies.
"""
np.seterr(over='ignore')
np.seterr(divide='ignore')
return (1/(1+np.exp((E-mu)/kT)))
#%%
def density_TF(phi,kT=0,E_F=0,material='InAs',band='conduction',Vz=0):
"""
Computes the charge density of a 3D (free) electron gas in the Thomas-Fermi
approximation.
Parameters
----------
phi: scalar or arr
Electrostatic energy.
kT: scalar
Temperature (in units of energy).
E_F: scalar or arr
Fermi energy.
material: str or dic
Material for which is evaluated. For a general material,
'material' is a dictionary with arguments m_eff (conduction
effective mass), m_eff_hh (heavy hole effective mass), m_eff_lh
(light hole effective mass), and E_gap (semiconductor gap). These
parameters are already saved in this function for InAs and InSb,
which can be chosen by choosing material='InAs' or 'InSb',
resprectively.
band: str
Whether to include 'conduction', 'valence' or 'both' bands in the
calculations.
Vz: scalar
Zeeman splitting.
Returns
-------
den: scalar or arr
Charge density in the Thomas-Fermi approximation for the given
electrostatic energies.
"""
np.seterr(invalid='ignore')
if material=='InAs':
m_eff=0.023
m_eff_hh=0.41
m_eff_lh=0.026
E_gap=418
elif material=='InSb':
m_eff=0.015
m_eff_hh=0.43
m_eff_lh=0.015
E_gap=170
else:
if 'E_gap' in material:
material['m_eff'], material['m_eff_hh'], material['m_eff_lh'], material['E_gap'] = m_eff, m_eff_hh, m_eff_lh, E_gap
else:
material['m_eff'] = m_eff
if band=='conduction':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
elif band=='valence':
if Vz==0:
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
else:
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
elif band=='both':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
return (den)
#%% ############################# Array manipulation
#%%
def order_eig(E,U=0,sparse='yes',BdG='yes'):
"""
Order the eigenfunctions from smaller to larger. If BdG==yes and
sparse==yes, it also ensures that there are the same number of positive
eigenvalues than negative.
Parameters
----------
E: arr
Eigenvalues.
U: arr
Eigenvectors.
sparse: {'yes','no'}
Whether the eigenspectrum has been computed from a sparse matrix.
BdG: {'yes','no'}
Whether the eigenspectrum must have BdG symmetry or not.
Returns
-------
E, U: arrs
Eigenspectrum ordered from smaller to larger eigenvalues.
"""
n_eig=len(E)
if np.isscalar(U):
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
return (idx)
else:
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
return (E),(U)
#%%
def length(vec):
"""
Length of a given vector. If vec is an scalar, its length is 1.
Parameters
----------
vec: scalar or arr
Input vector
Returns
-------
length: int
Length of vec. If vec is an scalar, its length is 1.
"""
if np.ndim(vec)==0:
length=1
else:
length=len(vec)
return length
#%%
def diagonal(N,k=0,init=0,step=1):
"""
Indices of some diagonal of a given marix. It is more efficient than its
numpy counterpart.
Parameters
----------
N: int
Length of the diagonal (number of elements).
k: int
Offset of the off-diagonal. k=0 is the main diagonal, k>0 is a
diagonal in the upper-part of the Hamiltonian, and k<0 in the
lower one.
init: int
The starting element of the diagonal.
step: int
The step between elements in the diagonal.
Returns
-------
indices: tuple of arr
Indices of the diagonal. The first element of the tuple are the
row elements, and the second one are the column ones.
"""
assert np.isscalar(k), 'The offset k must be a scalar'
if k==0:
indices=(np.arange(init,N,step=step),np.arange(init,N,step=step))
elif k>0:
indices=(np.arange(init,N-k,step=step),np.arange(init,N-k,step=step)+k)
elif k<0:
indices=(np.arange(init,N+k,step=step)-k,np.arange(init,N+k,step=step))
return(indices)
#%%
def concatenate(arg):
"""
Concatenate a list of arrays.
Parameters
----------
arg: tuple or list of arr
List of arrays to be concatenated.
Returns
-------
con: arr or list
Array or list of the concatenated list.
"""
if isinstance(arg[0],tuple) and len(arg[0])==2:
index_1, index_2 = np.array([]), np.array([])
for i in range(len(arg)):
index_1 = np.append(index_1,arg[i][0])
index_2 = np.append(index_2,arg[i][1])
indices=(index_1,index_2)
else:
indices=np.concatenate(arg)
return(indices)
#%%
def between(arg, interval):
"""
Computes whether a given number is between a given interval or not.
Parameters
----------
arg: scalar
Number to be evaluated.
interval: tuple
Interval in which perform the evaluation.
Returns
-------
result: bool
If arg is between interval, result=True, and result=False in other
case.
"""
if arg>=interval[0] and arg<=interval[1]:
result=True
else:
result=False
return(result)
#%%
def arg_isclose(vec,val):
"""
Find the index of a given vector that corresponds to the element of the
array "vec" which is closest to to an specific value "val".
Parameters
----------
vec: arr
Array in which it is desired to find the closest element.
val: scalar
Closest value.
Returns
-------
result: int
Index of the element of vec closest to val.
"""
arg=np.argmin(np.abs(vec-val))
return(arg)
#%% ############################# Constructors or extractors
#%%
def build_mesh(N,L,mesh_type='regular',fact=0.5,asym=1):
"""
Build a 2D inhomogeneous rectangular mesh.
Parameters
----------
N: arr
Number of sites in each direction.
L: arr
Length en each direction.
mesh_type: str
Whether to build a 'regular' mesh, or an inhomogeneous one with a
discretization given by a 'geometric' distribution, an 'exponential'
separation, or a 'random' one.
fact: scalar
Factor which regulates the separations between sites.
asym: scalar
The asymmetry between the factors applied for the x and y direction.
Returns
-------
x, y: mesh
Mesh in the x and y directions.
dis: mesh
Mesh with the discretization in each point.
"""
if mesh_type=='regular':
x, y = np.linspace(-L[1]/2,L[1]/2,N[0]), np.linspace(-L[0]/2,L[0]/2,N[1])
dis=np.array([np.abs(x[1]-x[0]),np.abs(y[1]-y[0])])
x,y=np.meshgrid(x,y,indexing='ij')
return (x,y,dis)
elif mesh_type=='geometric':
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xm[i,j]=(L[0]/2*fact**np.abs(i-int((N[0]-1)/2))-L[0]/2)*np.sign(i-int((N[0]-1)/2))*(L[0]/(L[0]/2*fact**np.abs(0-int((N[0]-1)/2))-L[0]/2)/2)
ym[i,j]=(L[1]/2*fact**np.abs(j-int((N[1]-1)/2))-L[1]/2)*np.sign(j-int((N[1]-1)/2))*(L[1]/(L[1]/2*fact**np.abs(0-int((N[1]-1)/2))-L[1]/2)/2)
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]= | np.abs(ym[i,j]-ym[i,j-1]) | numpy.abs |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = | np.array([]) | numpy.array |
"""
Test the quality of inference, measured by num edges + num mutations, and (if
using simulated data) the KC distance
"""
import os.path
import argparse
import collections
import itertools
import multiprocessing
import re
import time
import logging
import json
import msprime
import tskit
import numpy as np
import stdpopsim
import tsinfer
from error_generation import add_errors
import intervals
logging.basicConfig()
logger = logging.getLogger(__name__)
def make_switch_errors(sample_data, switch_error_rate=0, random_seed=None, **kwargs):
raise NotImplementedError
def rnd_kc(params):
ts, random_seed = params
s = tskit.Tree.generate_star(
ts.num_samples, span=ts.sequence_length, sample_lists=True)
kc = 0
for tree in ts.trees(sample_lists = True):
kc += tree.span * tree.kc_distance(s.split_polytomies(
random_seed=random_seed + tree.index, sample_lists=True))
return kc / ts.sequence_length
def simulate_stdpopsim(
species,
model,
contig,
num_samples,
mutation_file=None,
seed=123,
skip_existing=False,
num_procs=1,
):
base_fn = f"{model}_{contig}_n{num_samples}"
tree_fn = f"{base_fn}_seed{seed}"
logger.info(f"Using {species}:{contig} from stdpopsim using the {model} model")
if skip_existing and os.path.exists(tree_fn + ".trees"):
logger.info(
f"Simulation file {tree_fn}.trees already exists, returning that.")
return base_fn, tree_fn
sample_data = None
species = stdpopsim.get_species(species)
model = species.get_demographic_model(model)
num_pops = model.num_sampling_populations
if num_samples < num_pops or num_samples % num_pops != 0:
raise ValueError(
f"num_samples must be an integer multiple of {num_pops} "
f"(or 2 x {num_pops} if diploid sequencing error is injected)"
)
pop_n = num_samples // num_pops
logger.info(
f"Simulating {num_pops}x{pop_n} samples, seed {seed}, file prefix '{tree_fn}'."
)
contig = species.get_contig(contig)
l = contig.recombination_map.get_sequence_length()
if mutation_file is not None:
logger.debug(f"Loading {mutation_file}")
sample_data = tsinfer.load(mutation_file)
if sample_data.sequence_length != l:
raise ValueError(
f"Mismatching sequence_length between simulation and {mutation_file}")
# Reduce mutation rate to 0, as we will insert mutations later
contig = stdpopsim.Contig(
mutation_rate=0,
recombination_map=contig.recombination_map,
genetic_map=contig.genetic_map,
)
r_map = contig.recombination_map
assert len(r_map.get_rates()) == 2 # Ensure a single rate over chr
samples = model.get_samples(*([pop_n] * num_pops))
engine = stdpopsim.get_engine('msprime')
ts = engine.simulate(model, contig, samples, seed=seed)
tables = ts.dump_tables()
if sample_data is not None:
pos = sample_data.sites_position[:]
logger.info(
f"Inserting {len(pos)} mutations at variable sites from {mutation_file}")
for tree in ts.trees():
positions = pos[np.logical_and(pos>=tree.interval[0], pos<tree.interval[1])]
if len(positions) == 0:
continue
muts = list(zip(
np.random.uniform(0, tree.total_branch_length, size=len(positions)),
positions))
muts.sort()
tot = 0
# place a mutation on a random branch, proportional to branch length
try:
for n in tree.nodes():
tot += tree.branch_length(n)
while muts[0][0] < tot:
_, position = muts.pop(0)
s = tables.sites.add_row(position=position, ancestral_state="0")
tables.mutations.add_row(node=n, site=s, derived_state="1")
except IndexError:
# No more mutations - go to next tree
continue
tables.sort()
logger.debug(
f"Inserted mutations at density {ts.num_mutations/ts.sequence_length}")
interval = [int(l * 2/20), int(l * 2/20)+1e7] # 10Mb near the start, not centromeric
tables.keep_intervals([interval])
tables.trim()
logger.debug(
f"Cut down tree seq to {interval} ({tables.sites.num_rows} sites) for speed")
# Add info to the top-level metadata
user_data = {}
logger.info("Calculating the kc distance of the simulation against a flat tree")
star_tree = tskit.Tree.generate_star(
ts.num_samples, span=tables.sequence_length, record_provenance=False)
user_data['kc_max'] = tables.tree_sequence().kc_distance(star_tree.tree_sequence)
kc_array = []
max_reps = 100
ts = tables.tree_sequence()
logger.info(
f"Calculating KC distance of the sim against at most {max_reps} * {ts.num_trees}"
f" random trees using {num_procs} parallel threads. This could take a while."
)
seeds = range(seed, seed + max_reps)
with multiprocessing.Pool(num_procs) as pool:
for i, kc in enumerate(pool.imap_unordered(
rnd_kc, zip(itertools.repeat(ts), seeds))
):
kc_array.append(kc)
if i > 10:
se_mean = np.std(kc_array, ddof=1)/np.sqrt(i)
# break if SEM < 1/100th of mean KC. This can take along time
if se_mean/np.average(kc_array) < 0.01:
logger.info(
f"Stopped after {i} replicates as kc_max_split deemed accurate.")
break
user_data['kc_max_split'] = np.average(kc_array)
if tables.metadata_schema != tskit.MetadataSchema({"codec":"json"}):
if tables.metadata:
raise RuntimeError("Metadata already exists, and is not JSON")
tables.metadata_schema = tskit.MetadataSchema({"codec":"json"})
tables.metadata = {}
tables.metadata = { **tables.metadata, "user_data": user_data}
tables.tree_sequence().dump(tree_fn + ".trees")
return base_fn, tree_fn
def test_sim(seed):
ts = msprime.simulate(
10,
length=1000,
mutation_rate=1e-2,
recombination_rate=1e-2,
random_seed=seed)
return ts, f"test_sim{seed}"
def setup_sampledata_from_simulation(
prefix, random_seed, err=0, num_threads=1,
cheat_breakpoints=False, use_sites_time=False, skip_existing=False):
"""
Take the results of a simulation and return a sample data file, some reconstructed
ancestors, a recombination rate array, a suffix to append to the file prefix, and
the original tree sequence.
If 'err' is 0, we do not inject any errors into the haplotypes. Otherwise
we add empirical sequencing error and ancestral allele polarity error
If "cheat_recombination" is True, multiply the recombination_rate for known
recombination locations from the simulation by 20
If "use_sites_time" is True, use the times
If "skip_existing" is True, and the sample_data file and ancestors_file that were
going to be generated already exist, then skip the actual simulation and just return
those files and their data.
"""
suffix = ""
ts = tskit.load(prefix + ".trees")
plain_samples = tsinfer.SampleData.from_tree_sequence(
ts, use_sites_time=use_sites_time)
if cheat_breakpoints:
suffix += "cheat_breakpoints"
logger.info("Cheating by using known breakpoints")
if use_sites_time:
suffix += "use_times"
logger.info("Cheating by using known times")
if err == 0:
sd_path = prefix + suffix + ".samples"
if skip_existing and os.path.exists(sd_path):
logger.info(f"Simulation file {sd_path} already exists, loading that.")
sd = tsinfer.load(sd_path)
else:
sd = plain_samples.copy(path=sd_path) # Save the samples file
sd.finalise()
else:
logger.info("Adding error")
suffix += f"_ae{err}"
sd_path = prefix + suffix + ".samples"
if skip_existing and os.path.exists(sd_path):
logger.info(f"Sample file {sd_path} already exists, loading that.")
sd = tsinfer.load(sd_path)
else:
error_file = add_errors(
plain_samples,
err,
random_seed=random_seed)
sd = error_file.copy(path=prefix + suffix + ".samples")
if use_sites_time:
# Sites that were originally singletons have time 0, but could have been
# converted to inference sites when adding error. Give these a nonzero time
sites_time = sd.sites_time
sites_time[sites_time == 0] = np.min(sites_time[sites_time > 0])/1000.0
sd.sites_time[:] = sites_time
sd.finalise()
for attribute in ('sequence_length', 'num_samples', 'num_sites'):
if getattr(sd, attribute) != getattr(ts, attribute):
raise ValueError(
f"{attribute} differs between original ts and sample_data: "
f"{getattr(sd, attribute)} vs {getattr(ts, attribute)}")
anc_path = prefix + suffix + ".ancestors"
if skip_existing and os.path.exists(anc_path):
logger.info(f"Ancestors file {anc_path} already exists, loading that.")
anc = tsinfer.load(anc_path)
else:
anc = tsinfer.generate_ancestors(
sd,
num_threads=num_threads,
path=anc_path,
)
logger.info("GA done")
inference_pos = anc.sites_position[:]
rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch
if cheat_breakpoints:
raise NotImplementedError("Need to make a RateMap with higher r at breakpoints")
breakpoint_positions = np.array(list(ts.breakpoints()))
inference_positions = anc.sites_position[:]
breakpoints = np.searchsorted(inference_positions, breakpoint_positions)
# Any after the last inference position must be junked
# (those before the first inference position make no difference)
breakpoints = breakpoints[breakpoints != len(rho)]
rho[breakpoints] *= 20
return sd.path, anc.path, rho, suffix, ts
def setup_sample_file(base_filename, args, num_threads=1):
"""
Return a sample data file, the ancestors file, a corresponding recombination rate
(a single number or a RateMap), a prefix to use for files, and None
"""
gmap = args.genetic_map
sd = tsinfer.load(base_filename + ".samples")
anc = tsinfer.generate_ancestors(
sd,
num_threads=num_threads,
path=base_filename + ".ancestors",
)
logger.info("GA done")
inference_pos = anc.sites_position[:]
match = re.search(r'(chr\d+)', base_filename)
if match or gmap is not None:
if gmap is not None:
try:
rho=float(gmap)
logger.info(f"Using rate {gmap} for the recombination rate")
except ValueError:
rho = intervals.read_hapmap(gmap)
logger.info(f"Using file from {gmap} for the recombination map")
else:
chr = match.group(1)
logger.info(f"Using {chr} from HapMapII_GRCh37 for the recombination map")
gmap = stdpopsim.get_species("HomSap").get_genetic_map(id="HapMapII_GRCh37")
if not gmap.is_cached():
gmap.download()
filename = os.path.join(gmap.map_cache_dir, gmap.file_pattern.format(id=chr))
rho = intervals.read_hapmap(filename)
else:
rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch
#if np.any(d==0):
# w = np.where(d==0)
# raise ValueError("Zero recombination rates at", w, inference_pos[w])
return sd.path, anc.path, rho, "", None
# Parameters passed to each subprocess
Params = collections.namedtuple(
"Params",
"ts_file, sample_file, anc_file, rec_rate, ma_mis_ratio, ms_mis_ratio, precision, "
"num_threads, kc_max, kc_max_split, seed, error, source, skip_existing"
)
Results = collections.namedtuple(
"Results",
"arity_mean, arity_var, edges, error, kc_max_split, kc_max, kc_poly, kc_split, "
"muts, n, num_sites, min_num_muts, revised_num_muts, num_trees, precision, "
"proc_time, ma_mis_ratio, ms_mis_ratio, seed, sim_ts_min_bytes, sim_ts_bytes, "
"source, ts_bytes, ts_path"
)
def run(params):
"""
Run a single inference, with the specified rates
"""
precision = params.precision
logger.info(
f"Starting {params.ma_mis_ratio} {params.ms_mis_ratio}. Precision {precision}"
)
prefix = None
assert params.sample_file.endswith(".samples")
assert params.anc_file.endswith(".ancestors")
samples = tsinfer.load(params.sample_file)
ancestors = tsinfer.load(params.anc_file)
start_time = time.process_time()
prefix = params.sample_file[0:-len(".samples")]
inf_prefix = "{}_rma{:g}_rms{:g}_p{}".format(
prefix,
params.ma_mis_ratio,
params.ms_mis_ratio,
precision)
ats_path = inf_prefix + ".atrees"
if params.skip_existing and os.path.exists(ats_path):
logger.info(f"Ancestors ts file {ats_path} already exists, loading that.")
inferred_anc_ts = tskit.load(ats_path)
prov = json.loads(inferred_anc_ts.provenances()[-1].record.encode())
if ancestors.uuid != prov['parameters']['source']['uuid']:
logger.warning(
"The loaded ancestors ts does not match the ancestors file. "
"Checking the site positions, and will abort if they don't match!")
# We might be re-running this, but the simulation file is the same
# So double-check that the positions in the ancestors ts are a subset of
# those in the used sample data file
assert np.all(np.isin(
inferred_anc_ts.tables.sites.position,
samples.sites_position[:]))
else:
logger.info(f"MA running: will save to {ats_path}")
inferred_anc_ts = tsinfer.match_ancestors(
samples,
ancestors,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_ratio=params.ma_mis_ratio,
)
inferred_anc_ts.dump(ats_path)
logger.info(f"MA done: mismatch ratio = {params.ma_mis_ratio}")
ts_path = inf_prefix + ".trees"
if params.skip_existing and os.path.exists(ts_path):
logger.info(f"Inferred ts file {ts_path} already exists, loading that.")
inferred_ts = tskit.load(ts_path)
try:
user_data = inferred_ts.metadata['user_data']
process_time = user_data.get("proc_time", None)
# Check we have all the required user data
if set(inferred_ts.metadata['user_data'].keys()) != set(Results._fields):
raise ValueError("Non-matching fields")
try:
assert np.allclose(params.kc_max, user_data['kc_max'])
except (KeyError, TypeError):
pass # could be NaN e.g. if this is real data
return user_data
except (TypeError, KeyError):
logging.warning(f"No metadata in {ts_path}: recalculating")
except ValueError:
logging.warning(f"Unexpected metadata in {ts_path}: recalculating")
else:
# Otherwise finish off the inference
logger.info(f"MS running with {params.num_threads} threads: will save to {ts_path}")
inferred_ts = tsinfer.match_samples(
samples,
inferred_anc_ts,
num_threads=params.num_threads,
precision=precision,
recombination_rate=params.rec_rate,
mismatch_ratio=params.ms_mis_ratio)
process_time = time.process_time() - start_time
logger.info(f"MS done: mismatch ratio = {params.ms_mis_ratio}")
simplified_inferred_ts = inferred_ts.simplify() # Remove unary nodes
# Calculate mean num children (polytomy-measure) for internal nodes
nc_sum = 0
nc_sum_sq = 0
nc_tot = 0
# Number of mutations after removing muts above samples for multi-mutation sites
revised_num_muts = 0
smp_set = set(simplified_inferred_ts.samples())
root_lengths = collections.defaultdict(float)
for tree in simplified_inferred_ts.trees():
for site in tree.sites():
if len(site.mutations) == 1:
revised_num_muts += 1
else:
revised_num_muts += len([m for m in site.mutations if m.node in smp_set])
for n in tree.nodes():
n_children = tree.num_children(n)
if n_children > 0: # exclude leaves/samples
nc_sum += n_children * tree.span
nc_sum_sq += (n_children ** 2) * tree.span
nc_tot += tree.span
arity_mean = nc_sum/nc_tot
arity_var = nc_sum_sq / nc_tot - (arity_mean ** 2) # can't be bothered to adjust for n
sim_ts_bytes = sim_ts_min_bytes = None
kc_poly = kc_split = None
min_num_muts = None
# Calculate measures against the known (simulated) ts, if it exists
if params.ts_file is not None:
try:
simulated_ts = tskit.load(params.ts_file + ".trees")
logger.info(
f"Calculating minimum req. mutations against {ts_path} using parsimony")
min_num_muts = 0
tree_iter = simulated_ts.trees()
tree = next(tree_iter)
for v in samples.variants():
while v.site.position >= tree.interval.right:
tree = next(tree_iter)
anc_state, mutations = tree.map_mutations(
genotypes=v.genotypes,
alleles=v.alleles,
ancestral_state=v.site.ancestral_state,
)
min_num_muts += len(mutations)
logger.info(f"Calculating KC distances for {ts_path}")
sim_ts_bytes = simulated_ts.nbytes
sim_ts_min_bytes = simulated_ts.simplify(
keep_unary=True, reduce_to_site_topology=True, filter_sites=False).nbytes
kc_poly = simplified_inferred_ts.kc_distance(simulated_ts)
logger.debug("KC poly calculated")
kc_split = 0
for interval, orig_tree, new_tree in simulated_ts.coiterate(
simplified_inferred_ts, sample_lists=True
):
kc_split += interval.span * orig_tree.kc_distance(
new_tree.split_polytomies(
random_seed=int(interval.left),
sample_lists=True))
kc_split /= simulated_ts.sequence_length
logger.debug("KC split calculated")
except FileNotFoundError:
pass
results = Results(
arity_mean=arity_mean,
arity_var=arity_var,
edges=inferred_ts.num_edges,
error=params.error,
kc_max_split=params.kc_max_split,
kc_max=params.kc_max,
kc_poly=kc_poly,
kc_split=kc_split,
muts=inferred_ts.num_mutations,
n=inferred_ts.num_samples,
num_sites=inferred_ts.num_sites,
min_num_muts=min_num_muts,
revised_num_muts=revised_num_muts,
num_trees=inferred_ts.num_trees,
precision=precision,
proc_time=process_time,
ma_mis_ratio=params.ma_mis_ratio,
ms_mis_ratio=params.ms_mis_ratio,
seed=params.seed,
sim_ts_min_bytes=sim_ts_min_bytes,
sim_ts_bytes=sim_ts_bytes,
source=params.source,
ts_bytes=inferred_ts.nbytes,
ts_path=ts_path,
)
# Save the results into the ts metadata - this should allow us to reconstruct the
# results table should anything go awry, or if we need to add more
tables = inferred_ts.dump_tables()
if tables.metadata_schema != tskit.MetadataSchema({"codec":"json"}):
if tables.metadata:
raise RuntimeError("Metadata already exists in the ts, and is not JSON")
tables.metadata_schema = tskit.MetadataSchema({"codec":"json"})
tables.metadata = {}
tables.metadata = {**tables.metadata, "user_data": results._asdict()}
tables.tree_sequence().dump(ts_path)
return results._asdict()
def run_replicate(rep, args):
"""
The main function that runs a parameter set
"""
params = {} # The info to be passed though to each inference run
params['num_threads'] = args.num_threads
params['error'] = args.error
params['source'] = args.source
params['skip_existing'] = args.skip_existing_params
params['seed'] = rep+args.random_seed
precision = [None] if len(args.precision) == 0 else args.precision
if args.source.count(":") >= 3:
logger.debug("Simulating")
details = args.source.split(":")
base_name, ts_name = simulate_stdpopsim(
species=details[0],
contig=details[1],
model=details[2],
num_samples=int(details[3]),
mutation_file=details[4] if len(details)>4 else None,
seed=params['seed'],
skip_existing=params['skip_existing'],
num_procs=args.num_processes,
)
sample_file, anc_file, rho, suffix, ts = setup_sampledata_from_simulation(
ts_name,
random_seed=params['seed'],
err=params['error'],
num_threads=params['num_threads'],
cheat_breakpoints=args.cheat_breakpoints,
use_sites_time=args.use_sites_time,
skip_existing=params['skip_existing'],
)
prefix = ts_name + suffix
base_name += suffix
else:
logger.debug(f"Using provided sample data file {params['source']}")
if not params['source'].endswith(".samples"):
raise ValueError("Sample data file must end with '.samples'")
prefix = params['source'][:-len(".samples")]
sample_file, anc_file, rho, suffix, ts = setup_sample_file(
prefix, args, params['num_threads'])
ts_name = None
base_name = prefix + suffix
params['kc_max'], params['kc_max_split'] = None, None
try:
params['kc_max'] = ts.metadata['user_data']['kc_max']
params['kc_max_split'] = ts.metadata['user_data']['kc_max_split']
except (TypeError, KeyError, AttributeError):
pass
param_iter = [
Params(ts_name, sample_file, anc_file, rho, ma_mr, ms_mr, p, **params)
for ms_mr in args.match_samples_mismatch_ratio
for ma_mr in args.match_ancestors_mismatch_ratio
for p in precision]
treefiles = []
results_filename = prefix + "_results.csv"
with open(results_filename, "wt") as file:
headers = []
if args.num_processes < 2:
for p in param_iter:
result = run(p)
if len(headers) == 0:
headers = list(result.keys())
print(",".join(headers), file=file)
else:
if set(headers) != set(result.keys()):
logging.warning("Some differences in headers")
result_str = [str(result.get(h, "")) for h in headers]
print(",".join(result_str), file=file, flush=True)
treefiles.append(result['ts_path'])
else:
num_procs = min(len(param_iter), args.num_processes)
logger.info(
f"Parallelising {len(param_iter)} parameter combinations "
f"over {num_procs} processes")
with multiprocessing.Pool(num_procs) as pool:
for result in pool.imap_unordered(run, param_iter):
# Save to a results file.
if len(headers) == 0:
headers = list(result.keys())
print(",".join(headers), file=file)
else:
if set(headers) != set(result.keys()):
logging.warning("Some differences in headers")
result_str = [str(result.get(h, "")) for h in headers]
print(",".join(result_str), file=file, flush=True)
treefiles.append(result['ts_path'])
logger.info(f"Results saved to {results_filename}")
return base_name, treefiles
if __name__ == "__main__":
# Set up the range of params for multiprocessing
default_match_samples_mismatch_ratio = np.array(
[1e4, 1e3, 1e2, 10, 5, 2, 1, 0.5, 0.1, 5e-2, 1e-2, 5e-3, 1e-3, 1e-4, 1e-5])
default_match_ancestors_mismatch_ratio = | np.array(
[1e4, 1e3, 1e2, 10, 5, 2, 1, 0.5, 0.1, 5e-2, 1e-2, 5e-3, 1e-3, 1e-4, 1e-5]) | numpy.array |
import numpy as np
from scipy import sparse
from mm2d import util
import qpoases
import IPython
# mpc parameters
NUM_WSR = 100 # number of working set recalculations
NUM_ITER = 3 # number of linearizations/iterations
# TODO experimental MPC controller that uses the SQP controller under the hood
# - is there is a significant penalty or wrapping things up as Python functions
# rather than directly as arrays?
# - ideally, we'd make a library of objectives, bounds, and constraints that
# could be put together for different behaviours
class TrackingMPC:
def __init__(self, model, dt, Q, R, num_horizon):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.num_horizon = num_horizon
ni = self.model.ni
nv = num_horizon * ni
# setup SQP values
bounds = sqp.Bounds(-model.vel_lim*np.ones(nv), model.vel_lim*np.ones(nv))
def obj_val(x0, xd, var):
q = x0
J = 0
for k in range(num_horizon):
u = var[k*ni:(k+1)*ni] # TODO would be nicer if var was 2D
q = q + dt * u
p = model.forward(q)
J += 0.5 * (p @ Q @ p + u @ R @ u)
return J
class MPC(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
fbar[k*no:(k+1)*no] = p
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = J
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
return H, g
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = ni*N joint acceleration constraints
num_var = ni * N
num_constraints = ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g = self._lookahead(q0, pr, u, N)
lb, ub = self._calc_vel_limits(u, ni, N)
A, lbA, ubA = self._calc_acc_limits(u, dq0, ni, N)
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g = self._lookahead(q0, pr, u, N)
lb, ub = self._calc_vel_limits(u, ni, N)
A, lbA, ubA = self._calc_acc_limits(u, dq0, ni, N)
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N)
# return first optimal input
return u[:self.model.ni]
class ObstacleAvoidingMPC(object):
''' Model predictive controller with obstacle avoidance. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
fbar[k*no:(k+1)*no] = p
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = J
# TODO hardcoded radius
# EE and obstacle
obs_radius = 0.6
d_ee_obs = np.linalg.norm(p - pc) - obs_radius
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - obs_radius - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 2*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N, pc):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N, pc)
# return first optimal input
return u[:self.model.ni]
class ObstacleAvoidingMPC2(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2+1
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
pm = self.model.forward_m(q)
Jm = self.model.jacobian_m(q)
fbar[k*no:(k+1)*no] = pm
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
# TODO hardcoded radius
# EE and obstacle
d_ee_obs = np.linalg.norm(p - pc) - 0.5
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - 0.5 - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
# pf and ee: these need to stay close together
pf = self.model.forward_f(q)
Jf = self.model.jacobian_f(q)
d_pf_ee = np.linalg.norm(p - pf)
A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
lbA_pf_ee = d_pf_ee - 0.75
Abar[k*num_body_pts+2, k*ni:(k+1)*ni] = A_pf_ee
lbA[k*num_body_pts+2] = lbA_pf_ee
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 3*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N, pc):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N, pc)
# return first optimal input
return u[:self.model.ni]
# class MPC2(object):
# ''' Model predictive controller. '''
# def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
# self.model = model
# self.dt = dt
# self.Q = Q
# self.R = R
# self.vel_lim = vel_lim
# self.acc_lim = acc_lim
#
# def _lookahead(self, q0, pr, u, N):
# ''' Generate lifted matrices proprogating the state N timesteps into the
# future. '''
# ni = self.model.ni # number of joints
# no = self.model.no # number of Cartesian outputs
#
# fbar = np.zeros(no*N) # Lifted forward kinematics
# Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
# Qbar = np.kron(np.eye(N), self.Q)
# Rbar = np.kron(np.eye(N), self.R)
#
# # lower triangular matrix of ni*ni identity matrices
# Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
#
# # Integrate joint positions from the last iteration
# qbar = np.tile(q0, N+1)
# qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
#
# num_body_pts = 1
# Abar = np.zeros((N*num_body_pts, ni*N))
# lbA = np.zeros(N*num_body_pts)
#
# for k in range(N):
# q = qbar[(k+1)*ni:(k+2)*ni]
# p = self.model.forward(q)
# J = self.model.jacobian(q)
#
# pm = self.model.forward_m(q)
# Jm = self.model.jacobian_m(q)
#
# fbar[k*no:(k+1)*no] = pm
# Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
#
# # pf and ee
# pf = self.model.forward_f(q)
# Jf = self.model.jacobian_f(q)
# d_pf_ee = np.linalg.norm(p - pf)
# A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
# lbA_pf_ee = d_pf_ee - 0.75
# Abar[k*num_body_pts, k*ni:(k+1)*ni] = A_pf_ee
# lbA[k*num_body_pts] = lbA_pf_ee
#
# dbar = fbar - pr
#
# H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
# g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
# A = self.dt*Abar.dot(Ebar)
#
# return H, g, A, lbA
#
# def _calc_vel_limits(self, u, ni, N):
# L = np.ones(ni * N) * self.vel_lim
# lb = -L - u
# ub = L - u
# return lb, ub
#
# def _calc_acc_limits(self, u, dq0, ni, N):
# # u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# # u is [u_0, ..., u_{N-1}]
# u_prev = np.zeros(ni * N)
# u_prev[:ni] = dq0
# u_prev[ni:] = u[:-ni]
#
# L = self.dt * np.ones(ni * N) * self.acc_lim
# lbA = -L - u + u_prev
# ubA = L - u + u_prev
#
# d1 = np.ones(N)
# d2 = -np.ones(N - 1)
#
# # A0 is NxN
# A0 = sparse.diags((d1, d2), [0, -1]).toarray()
#
# # kron to make it work for n-dimensional inputs
# A = np.kron(A0, np.eye(ni))
#
# return A, lbA, ubA
#
# def _iterate(self, q0, dq0, pr, u, N):
# ni = self.model.ni
#
# # Create the QP, which we'll solve sequentially.
# # num vars, num constraints (note that constraints only refer to matrix
# # constraints rather than bounds)
# # num constraints = N obstacle constraints and ni*N joint acceleration
# # constraints
# num_var = ni * N
# num_constraints = N + ni * N
# qp = qpoases.PySQProblem(num_var, num_constraints)
# options = qpoases.PyOptions()
# options.printLevel = qpoases.PyPrintLevel.NONE
# qp.setOptions(options)
#
# # Initial opt problem.
# H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N)
# ubA_obs = np.infty * np.ones_like(lbA_obs)
#
# lb, ub = self._calc_vel_limits(u, ni, N)
# A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
#
# A = np.vstack((A_obs, A_acc))
# lbA = np.concatenate((lbA_obs, lbA_acc))
# ubA = np.concatenate((ubA_obs, ubA_acc))
#
# ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
# delta = np.zeros(ni * N)
# qp.getPrimalSolution(delta)
# u = u + delta
#
# # Remaining sequence is hotstarted from the first.
# for i in range(NUM_ITER - 1):
# H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N)
# lb, ub = self._calc_vel_limits(u, ni, N)
# A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
# A = np.vstack((A_obs, A_acc))
# lbA = np.concatenate((lbA_obs, lbA_acc))
# ubA = np.concatenate((ubA_obs, ubA_acc))
#
# qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
# qp.getPrimalSolution(delta)
#
# u = u + delta
#
# return u
#
# def solve(self, q0, dq0, pr, N):
# ''' Solve the MPC problem at current state x0 given desired output
# trajectory Yd. '''
# # initialize optimal inputs
# u = np.zeros(self.model.ni * N)
#
# # iterate to final solution
# u = self._iterate(q0, dq0, pr, u, N)
#
# # return first optimal input
# return u[:self.model.ni]
class MPC2(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
num_body_pts = 2+1
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
p = self.model.forward(q)
J = self.model.jacobian(q)
pm = self.model.forward_m(q)
Jm = self.model.jacobian_m(q)
fbar[k*no:(k+1)*no] = pm
Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
# TODO hardcoded radius
# EE and obstacle
d_ee_obs = np.linalg.norm(p - pc) - 0.45
Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
lbA[k*num_body_pts] = -d_ee_obs
# base and obstacle
pb = q[:2]
Jb = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
d_base_obs = np.linalg.norm(pb - pc) - 0.45 - 0.56
Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
lbA[k*num_body_pts+1] = -d_base_obs
# pf and ee: these need to stay close together
pf = self.model.forward_f(q)
Jf = self.model.jacobian_f(q)
d_pf_ee = np.linalg.norm(p - pf)
A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
lbA_pf_ee = d_pf_ee - 0.75
Abar[k*num_body_pts+2, k*ni:(k+1)*ni] = A_pf_ee
lbA[k*num_body_pts+2] = lbA_pf_ee
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 3*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
delta = np.zeros(ni * N)
qp.getPrimalSolution(delta)
u = u + delta
# Remaining sequence is hotstarted from the first.
for i in range(NUM_ITER - 1):
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
qp.hotstart(H, g, A, lb, ub, lbA, ubA, np.array([NUM_WSR]))
qp.getPrimalSolution(delta)
u = u + delta
return u
def solve(self, q0, dq0, pr, N, pc):
''' Solve the MPC problem at current state x0 given desired output
trajectory Yd. '''
# initialize optimal inputs
u = np.zeros(self.model.ni * N)
# iterate to final solution
u = self._iterate(q0, dq0, pr, u, N, pc)
# return first optimal input
return u[:self.model.ni]
class EmbraceMPC(object):
''' Model predictive controller. '''
def __init__(self, model, dt, Q, R, vel_lim, acc_lim):
self.model = model
self.dt = dt
self.Q = Q
self.R = R
self.vel_lim = vel_lim
self.acc_lim = acc_lim
def _lookahead(self, q0, pr, u, N, pc):
''' Generate lifted matrices proprogating the state N timesteps into the
future. '''
ni = self.model.ni # number of joints
no = self.model.no # number of Cartesian outputs
fbar = np.zeros(no*N) # Lifted forward kinematics
Jbar = np.zeros((no*N, ni*N)) # Lifted Jacobian
Qbar = np.kron(np.eye(N), self.Q)
Rbar = np.kron(np.eye(N), self.R)
# lower triangular matrix of ni*ni identity matrices
Ebar = np.kron(np.tril(np.ones((N, N))), np.eye(ni))
# Integrate joint positions from the last iteration
qbar = np.tile(q0, N+1)
qbar[ni:] = qbar[ni:] + self.dt * Ebar.dot(u)
# TODO: need to integrate pc as well: this takes the place of fbar
num_body_pts = 2+1
Abar = np.zeros((N*num_body_pts, ni*N))
lbA = np.zeros(N*num_body_pts)
for k in range(N):
q = qbar[(k+1)*ni:(k+2)*ni]
# calculate points defining front of base
pb = q[:2]
θb = q[2]
R = util.rotation_matrix(θb)
rx = 0.5
ry = 0.25
p1 = R.dot(np.array([rx, ry]))
p2 = R.dot(np.array([rx, -ry]))
# pf is the closest point to the line segment
pf, _ = util.dist_to_line_segment(pc, p1, p2)
# transform into body frame
b_pf = R.T.dot(pf - pb)
JR = util.rotation_jacobian(θb)
Jf = np.hstack((R, JR.dot(pb + b_pf)[:, None], np.zeros((2, 2))))
pe = self.model.forward(q)
Je = self.model.jacobian(q)
re = (pc - pe) / np.linalg.norm(pc - pe)
rf = (pc - pf) / np.linalg.norm(pc - pf)
# propagate center of object forward
pc = pc + self.dt*(Jf + Je).dot(u)
# fbar[k*no:(k+1)*no] = pm
# Jbar[k*no:(k+1)*no, k*ni:(k+1)*ni] = Jm
# # TODO hardcoded radius
# # EE and obstacle
# d_ee_obs = np.linalg.norm(p - pc) - 0.45
# Abar[k*num_body_pts, k*ni:(k+1)*ni] = (p - pc).T.dot(J) / np.linalg.norm(p - pc)
# lbA[k*num_body_pts] = -d_ee_obs
#
# # base and obstacle
# pb = q[:2]
# Jb = np.array([[1, 0, 0, 0, 0],
# [0, 1, 0, 0, 0]])
# d_base_obs = np.linalg.norm(pb - pc) - 0.45 - 0.56
# Abar[k*num_body_pts+1, k*ni:(k+1)*ni] = (pb - pc).T.dot(Jb) / np.linalg.norm(pb - pc)
# lbA[k*num_body_pts+1] = -d_base_obs
#
# # pf and ee: these need to stay close together
# pf = self.model.forward_f(q)
# Jf = self.model.jacobian_f(q)
# d_pf_ee = np.linalg.norm(p - pf)
# A_pf_ee = -(pf - p).T.dot(Jf - J) / d_pf_ee
# lbA_pf_ee = d_pf_ee - 0.75
# Abar[k*num_body_pts+2, k*ni:(k+1)*ni] = A_pf_ee
# lbA[k*num_body_pts+2] = lbA_pf_ee
dbar = fbar - pr
H = Rbar + self.dt**2*Ebar.T.dot(Jbar.T).dot(Qbar).dot(Jbar).dot(Ebar)
g = u.T.dot(Rbar) + self.dt*dbar.T.dot(Qbar).dot(Jbar).dot(Ebar)
A = self.dt*Abar.dot(Ebar)
return H, g, A, lbA
def _calc_vel_limits(self, u, ni, N):
L = np.ones(ni * N) * self.vel_lim
lb = -L - u
ub = L - u
return lb, ub
def _calc_acc_limits(self, u, dq0, ni, N):
# u_prev consists of [dq0, u_0, u_1, ..., u_{N-2}]
# u is [u_0, ..., u_{N-1}]
u_prev = np.zeros(ni * N)
u_prev[:ni] = dq0
u_prev[ni:] = u[:-ni]
L = self.dt * np.ones(ni * N) * self.acc_lim
lbA = -L - u + u_prev
ubA = L - u + u_prev
d1 = np.ones(N)
d2 = -np.ones(N - 1)
# A0 is NxN
A0 = sparse.diags((d1, d2), [0, -1]).toarray()
# kron to make it work for n-dimensional inputs
A = np.kron(A0, np.eye(ni))
return A, lbA, ubA
def _iterate(self, q0, dq0, pr, u, N, pc):
ni = self.model.ni
# Create the QP, which we'll solve sequentially.
# num vars, num constraints (note that constraints only refer to matrix
# constraints rather than bounds)
# num constraints = N obstacle constraints and ni*N joint acceleration
# constraints
num_var = ni * N
num_constraints = 3*N + ni * N
qp = qpoases.PySQProblem(num_var, num_constraints)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qp.setOptions(options)
# Initial opt problem.
H, g, A_obs, lbA_obs = self._lookahead(q0, pr, u, N, pc)
ubA_obs = np.infty * np.ones_like(lbA_obs)
lb, ub = self._calc_vel_limits(u, ni, N)
A_acc, lbA_acc, ubA_acc = self._calc_acc_limits(u, dq0, ni, N)
A = np.vstack((A_obs, A_acc))
lbA = np.concatenate((lbA_obs, lbA_acc))
ubA = np.concatenate((ubA_obs, ubA_acc))
ret = qp.init(H, g, A, lb, ub, lbA, ubA, | np.array([NUM_WSR]) | numpy.array |
import csv
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import numpy as np
# read the data
csvfile=open("weightedX.csv", 'r')
x = list(csv.reader(csvfile))
csvfile=open("weightedY.csv", 'r')
y = list(csv.reader(csvfile))
m=len(x)
n=1
x3=[]
y2=[]
for i in range(m):
x3.append(float(x[i][0]))
y2.append(float(y[i][0]))
# normalise the data
meanx=sum(x3)/m
v=0 # variance
for i in range(m):
t=x3[i]-meanx
v+=t*t
v/=m
v=math.sqrt(v)
for i in range(m):
x3[i]=(x3[i]-meanx)/v
x2=[]
for i in range(m):
x2.append(np.array([1,x3[i]]))
X=np.array(x2)
Y= | np.array(y2) | numpy.array |
"""
Test of budget table parsing
"""
import os
import pytest
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["gwf_utl05"]
laytyp = [1]
ss = [1.0e-10]
sy = [0.1]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
nlay, nrow, ncol = 1, 1, 1
def build_model(idx, dir):
nper = 2
perlen = [2.0, 2.0]
nstp = [14, 14]
tsmult = [1.0, 1.0]
delr = 10.0
delc = 10.0
top = 10.0
botm = [0.0]
strt = top
hk = 1.0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 0.97
tdis_rc = []
for id in range(nper):
tdis_rc.append((perlen[id], nstp[id], tsmult[id]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
# create gwf model
gwfname = "gwf_" + name
newtonoptions = "NEWTON UNDER_RELAXATION"
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=gwfname,
newtonoptions=newtonoptions,
)
# create iterative model solution and register the gwf model with it
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
under_relaxation_theta=0.7,
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=np.ones((nlay, nrow, ncol), dtype=int),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf, save_flows=False, icelltype=laytyp[idx], k=hk, k33=hk
)
# storage
sto = flopy.mf6.ModflowGwfsto(
gwf,
save_flows=False,
iconvert=laytyp[idx],
ss=ss[idx],
sy=sy[idx],
steady_state={0: False},
transient={0: True},
)
# wel files
# include very small well rates to ensure that budget table correctly
# prints small numbers with 3-digit exponents
welspdict = {
0: [[(0, 0, 0), -1.0e-200, 0.0]],
1: [[(0, 0, 0), 1.0e-200, 0.0]],
}
wel = flopy.mf6.ModflowGwfwel(
gwf,
print_input=True,
print_flows=True,
stress_period_data=welspdict,
save_flows=False,
auxiliary="CONCENTRATION",
pname="WEL-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL")],
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
)
# write MODFLOW 6 files
sim.write_simulation()
return sim, None
def eval_flow(sim):
print("evaluating flow...")
name = ex[sim.idxsim]
gwfname = "gwf_" + name
# This will fail if budget numbers cannot be read
fpth = os.path.join(sim.simpath, "{}.lst".format(gwfname))
mflist = flopy.utils.Mf6ListBudget(fpth)
names = mflist.get_record_names()
print(names)
inc = mflist.get_incremental()
print(inc)
assert np.allclose(inc["WEL_IN"], 0.0)
assert | np.allclose(inc["WEL_OUT"], 0.0) | numpy.allclose |
import unittest
import numpy as np
from polynomials_on_simplices.calculus.error_measures import relative_error
from polynomials_on_simplices.geometry.mesh.basic_meshes.triangle_meshes import equilateral_triangle_vertices
from polynomials_on_simplices.geometry.primitives.simplex import (
affine_transformation_from_unit, cartesian_to_barycentric_unit, unit)
import polynomials_on_simplices.geometry.primitives.triangle as triangle
from polynomials_on_simplices.linalg.rigid_motion import move, random_rigid_motion
from polynomials_on_simplices.linalg.vector_space_projection import vector_projection
from polynomials_on_simplices.probability_theory.uniform_sampling import nsimplex_sampling
class TestEdges(unittest.TestCase):
def test_2d(self):
vertices = unit(2)
edges = triangle.edges(vertices)
expected_edges = np.array([
[-1.0, 1.0],
[0.0, -1.0],
[1.0, 0.0]
])
self.assertTrue(np.allclose(expected_edges, edges))
def test_3d(self):
vertices = unit(2, 3)
edges = triangle.edges(vertices)
expected_edges = np.array([
[-1.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0]
])
self.assertTrue(np.allclose(expected_edges, edges))
def test_diameter(self):
vertices = unit(2)
d = triangle.diameter(vertices)
self.assertEqual(d, np.sqrt(2))
def test_dual_edges_2d(self):
vertices = np.random.rand(3, 2)
edges = triangle.edges(vertices)
dual_edges = triangle.dual_edges(vertices)
# Test that dual edges are orthogonal to edges
for i in range(3):
self.assertTrue(abs(np.dot(edges[i], dual_edges[i])) < 1e-10)
# Test that dual edges point out of the triangle by comparing with the vector from the edge midpoint to the
# triangle centroid
c = triangle.centroid(vertices)
for i in range(3):
edge_midpoint = 0.5 * (vertices[(i + 1) % 3] + vertices[(i + 2) % 3])
self.assertTrue(np.dot(dual_edges[i], c - edge_midpoint) < 0.0)
def test_dual_edges_3d(self):
vertices = np.random.rand(3, 3)
edges = triangle.edges(vertices)
dual_edges = triangle.dual_edges(vertices)
# Test that dual edges are orthogonal to edges
for i in range(3):
self.assertTrue(abs(np.dot(edges[i], dual_edges[i])) < 1e-10)
# Test that dual edges point out of the triangle by comparing with the vector from the edge midpoint to the
# triangle centroid
c = triangle.centroid(vertices)
for i in range(3):
edge_midpoint = 0.5 * (vertices[(i + 1) % 3] + vertices[(i + 2) % 3])
self.assertTrue(np.dot(dual_edges[i], c - edge_midpoint) < 0.0)
class TestBasis(unittest.TestCase):
def test_2d(self):
p = np.random.rand(3, 2)
b = triangle.basis(p)
self.assertTrue(abs(np.dot(b[0], b[1])) < 1e-10)
self.assertTrue(abs(np.linalg.norm(b[0]) - 1) < 1e-10)
self.assertTrue(abs(np.linalg.norm(b[1]) - 1) < 1e-10)
def test_3d(self):
p = np.random.rand(3, 3)
b = triangle.basis(p)
self.assertTrue(abs(np.dot(b[0], b[1])) < 1e-10)
self.assertTrue(abs(np.linalg.norm(b[0]) - 1) < 1e-10)
self.assertTrue(abs(np.linalg.norm(b[1]) - 1) < 1e-10)
n1 = np.cross(b[0], b[1])
n2 = triangle.normal(p)
self.assertTrue( | np.allclose(n1, n2) | numpy.allclose |
"""Simulations of neural population 2.
Simulations of linear- and non-linear, calcium-based, spike-timing-dependent synaptic plasticity
of a homogeneous presynaptic population of 1000 neurons wired onto a single postsynaptic neuron.
Postsynaptic firing underlies the MAT or AEIF model.
Methods for abstracting the STDP to rate-based plasticity for large parameter spaces.
Simple usage example:
cbsp.set_simulation_time(2.0)
cbsp.set_timstep(0.001)
cbsp.population_2.linear_calcium_mat(u=10, w0=0.5, seed=10)
"""
import os
import multiprocessing
from itertools import product
from tqdm.auto import tqdm
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import Normalize
import cbsp
from cbsp import utils
from cbsp.utils import feature_matrix_p2 as feature_matrix
# ---------------- STDP
@nb.jit(nopython=True)
def linear_calcium_mat(u, w0, seed):
"""Integrates the spike-timing dependent synaptic strength.
Args:
u (float): presynaptic firing rate.
w0 (float): initial synapse strength.
seed (int): random state.
Returns:
tuple: (w, t, (u, v, I, c))
with
array: w, change of synapse strengths. Shape (#synapses, #timesteps).
array: t, time.
array: u, presynaptic spike trains. Shape (#synapses, #timesteps).
array: v, postsynaptic spike train. Shape (#timesteps).
array: I, postsynaptic current. Shape (#timesteps).
array: c, calcium traces. Shape (#synapses, #timesteps).
"""
# Calcium
tau_Ca = 0.02227212
Cpre = 0.84410
Cpost = 1.62138
# Plasticity
thetaD = 1
thetaP = 2.009289
drate = 137.7586
prate = 597.08922
sigma = 2.8284
tau = 520.76129
sqrttau = np.sqrt(tau)
# MAT model
tau_m, tau_1, tau_2 = 5.e-3, 10.e-3, 200.e-3
trefr = 2.e-3
c_N = 0.4
R = 50e6
alpha1, alpha2, w_mat = 30.e-3, 2.e-3, 20.e-3
Iconst = c_N * tau_m / R
time = np.linspace(0., cbsp.SIMULATION_TIME, int(cbsp.SIMULATION_TIME / cbsp.TIMESTEP) + 1)
N = 1000
V = 0.
theta1, theta2 = 0., 0.
Theta = 0.
trest = 0.
I = 0.
v_sp = 0.
np.random.seed(seed)
c = np.zeros(N)
w = np.zeros(N)
w[:] = w0 # np.random.normal(w0, w0_std, N)
w_rec_pop = np.zeros((N, len(time)))
u_rec_pop = np.zeros((N, len(time)))
v_rec = np.zeros_like(time)
I_rec = np.zeros_like(time)
c_rec_pop = np.zeros((N, len(time)))
for i, t in enumerate(time):
u_sp = utils.spike_train(N, u, cbsp.TIMESTEP)
n = utils.standard_normal(N, cbsp.TIMESTEP)
Hp = utils.heaviside(c - thetaP)
Hd = utils.heaviside(c - thetaD)
c = c - cbsp.TIMESTEP * c / tau_Ca + Cpre * u_sp + Cpost * v_sp
w = w + cbsp.TIMESTEP / tau * (prate * (1 - w) * Hp - drate * w * Hd + np.sqrt(Hp + Hd) * sigma * sqrttau * n)
I = Iconst * np.dot(w, u_sp)
V = V + cbsp.TIMESTEP * (-V / tau_m + R / tau_m * I)
theta1 = theta1 + cbsp.TIMESTEP * (-theta1 / tau_1) + alpha1 * v_sp
theta2 = theta2 + cbsp.TIMESTEP * (-theta2 / tau_2) + alpha2 * v_sp
Theta = theta1 + theta2 + w_mat
if V > Theta and t > trest:
v_sp = 1.
trest = t + trefr
else:
v_sp = 0.
w_rec_pop[:, i] = w
u_rec_pop[:, i] = u_sp
v_rec[i] = v_sp
I_rec[i] = I
c_rec_pop[:, i] = c
return w_rec_pop, time, (u_rec_pop, v_rec, I_rec, c_rec_pop)
@nb.jit(nopython=True)
def non_linear_calcium_mat(u, w0, seed):
"""
Same as ~cbsp.population_2.linear_calcium_mat(u, w0, seed) for the non linear calcium model.
"""
# Calcium
tau_Ca = 0.01893044
Cpre = 0.86467
Cpost = 2.30815
xi = (2 * (Cpost + Cpre) - Cpost) / Cpre - 1
# Plasticity
thetaD = 1
thetaP = 4.99780
drate = 111.82515
prate = 894.23695
sigma = 2.8284
tau = 707.02258
sqrttau = np.sqrt(tau)
# MAT model
tau_m, tau_1, tau_2 = 5.e-3, 10.e-3, 200.e-3
trefr = 2.e-3
c_N = 0.4
R = 50e6
alpha1, alpha2, w_mat = 30.e-3, 2.e-3, 20.e-3
Iconst = c_N * tau_m / R
time = np.linspace(0., cbsp.SIMULATION_TIME, int(cbsp.SIMULATION_TIME / cbsp.TIMESTEP) + 1)
N = 1000
V = 0.
theta1, theta2 = 0., 0.
Theta = 0.
trest = 0.
I = 0.
v_sp = 0.
np.random.seed(seed)
cpre = np.zeros(N)
cpost = np.zeros(N)
c = np.zeros(N)
w = np.zeros(N)
w[:] = w0 # np.random.normal(w0, w0_std, N)
w_rec_pop = np.zeros((N, len(time)))
u_rec_pop = np.zeros((N, len(time)))
v_rec = np.zeros_like(time)
I_rec = np.zeros_like(time)
c_rec_pop = np.zeros((N, len(time)))
for i, t in enumerate(time):
u_sp = utils.spike_train(N, u, cbsp.TIMESTEP)
n = utils.standard_normal(N, cbsp.TIMESTEP)
Hp = utils.heaviside(c - thetaP)
Hd = utils.heaviside(c - thetaD)
cpre = cpre - cbsp.TIMESTEP * cpre / tau_Ca + Cpre * u_sp
cpost = cpost - cbsp.TIMESTEP * cpost / tau_Ca + Cpost * v_sp + xi * v_sp * cpre
c = cpre + cpost
w = w + cbsp.TIMESTEP / tau * (prate * (1 - w) * Hp - drate * w * Hd + np.sqrt(Hp + Hd) * sigma * sqrttau * n)
I = Iconst * np.dot(w, u_sp)
V = V + cbsp.TIMESTEP * (-V / tau_m + R / tau_m * I)
theta1 = theta1 + cbsp.TIMESTEP * (-theta1 / tau_1) + alpha1 * v_sp
theta2 = theta2 + cbsp.TIMESTEP * (-theta2 / tau_2) + alpha2 * v_sp
Theta = theta1 + theta2 + w_mat
if V > Theta and t > trest:
v_sp = 1.
trest = t + trefr
else:
v_sp = 0.
w_rec_pop[:, i] = w
u_rec_pop[:, i] = u_sp
v_rec[i] = v_sp
I_rec[i] = I
c_rec_pop[:, i] = c
return w_rec_pop, time, (u_rec_pop, v_rec, I_rec, c_rec_pop)
@nb.jit(nopython=True)
def linear_calcium_aeif(u, w0, seed):
"""
Same as ~cbsp.population_2.linear_calcium_mat(u, w0, seed) for the aeif model.
"""
# Calcium
tau_Ca = 0.02227212
Cpre = 0.84410
Cpost = 1.62138
# Plasticity
thetaD = 1
thetaP = 2.009289
drate = 137.7586
prate = 597.08922
sigma = 2.8284
tau = 520.76129
sqrttau = np.sqrt(tau)
# AEIF model (ranamed w->z)
C = 2.81e-10 # 2.81e-9 # pF
g_L = 30e-9 # nS
E_L = -70.6e-3 # mV
V_T = -50.4e-3 # mV
D_T = 2e-3 # mV
tau_z = 0.144 # s
a = 4e-9 # nS
b = 0.0805e-9 # nA
R = 1 / g_L
tau_v = R * C
c_N = 0.17 # 0.075
Iconst = c_N * tau_v / R
time = np.linspace(0., cbsp.SIMULATION_TIME, int(cbsp.SIMULATION_TIME / cbsp.TIMESTEP) + 1)
N = 1000
V = E_L
z = 0.
I = 0.
v_sp = 0.
np.random.seed(seed)
c = np.zeros(N)
w = np.zeros(N)
w[:] = w0 # np.random.normal(w0, w0_std, N)
w_rec = np.zeros_like(time)
v_rec = np.zeros_like(time)
w_rec_pop = np.zeros((N, len(time)))
u_rec_pop = np.zeros((N, len(time)))
v_rec = np.zeros_like(time)
I_rec = np.zeros_like(time)
c_rec_pop = np.zeros((N, len(time)))
for i, t in enumerate(time):
# import pdb; pdb.set_trace()
u_sp = utils.spike_train(N, u, cbsp.TIMESTEP)
n = utils.standard_normal(N, cbsp.TIMESTEP)
Hp = utils.heaviside(c - thetaP)
Hd = utils.heaviside(c - thetaD)
c = c - cbsp.TIMESTEP * c / tau_Ca + Cpre * u_sp + Cpost * v_sp
w = w + cbsp.TIMESTEP / tau * (prate * (1 - w) * Hp - drate * w * Hd + np.sqrt(Hp + Hd) * sigma * sqrttau * n)
I = Iconst * np.dot(w, u_sp)
V = V + cbsp.TIMESTEP / tau_v * (- V + E_L + D_T * np.exp((V - V_T) / D_T) - R * z + R * I)
z = z + cbsp.TIMESTEP / tau_z * (a * (V - E_L) - z)
if V > V_T:
v_sp = 1.
V = V - (V_T - E_L)
z = z + b
else:
v_sp = 0.
w_rec_pop[:, i] = w
u_rec_pop[:, i] = u_sp
v_rec[i] = v_sp
I_rec[i] = I
c_rec_pop[:, i] = c
return w_rec_pop, time, (u_rec_pop, v_rec, I_rec, c_rec_pop)
# ---------------- RBP
def stdp2rbp_linear_calcium_mat(*args, **kwargs):
"""Rate-based plasticity from STDP using the linear calcium and MAT model.
Args:
- same as cbsp.population_2.linear_calcium_mat(u, v, w0, seed)
Returns:
tuple: (w, v)
with
float: w, the population average change of synapse strength at time point 0
float: v, the postsynaptic firing rate within the first 500ms.
"""
w_rec, t, (_, v_rec, _, _) = linear_calcium_mat(*args, **kwargs)
return utils.derivative(w_rec.mean(axis=0), t), v_rec[0:int(0.5 / cbsp.TIMESTEP)].sum() * 2
def stdp2rbp_non_linear_calcium_mat(*args, **kwargs):
"""Rate-based plasticity from STDP using the non linear calcium and MAT model.
Args:
- same as cbsp.population_2.non_linear_calcium_mat(u, v, w0, seed)
Returns:
tuple: (w, v)
with
float: w, the population average change of synapse strength at time point 0
float: v, the postsynaptic firing rate within the first 500ms.
"""
w_rec, t, (_, v_rec, _, _) = non_linear_calcium_mat(*args, **kwargs)
return utils.derivative(w_rec.mean(axis=0), t), v_rec[0:int(0.5 / cbsp.TIMESTEP)].sum() * 2
def stdp2rbp_linear_calcium_aeif(*args, **kwargs):
"""Rate-based plasticity from STDP using the linear calcium and AEIF model.
Args:
- same as cbsp.population_2.linear_calcium_aeif(u, v, w0, seed)
Returns:
tuple: (w, v)
with
float: w, the population average change of synapse strength at time point 0
float: v, the postsynaptic firing rate within the first 500ms.
"""
w_rec, t, (_, v_rec, _, _) = linear_calcium_aeif(*args, **kwargs)
return utils.derivative(w_rec.mean(axis=0), t), v_rec[0:int(0.5 / cbsp.TIMESTEP)].sum() * 2
def main_linear_calcium_mat(u=np.arange(0, 101),
w=np.arange(0, 1.05, 0.05),
seed=np.arange(0, 100),
nproc=2):
"""RBP from STDP for the whole parameter space using the linear calcium and MAT model.
Args:
u (array, optional): presynaptic firing rates. Defaults to np.arange(0, 101).
w (array, optional): initial synaptic strengths. Defaults to np.arange(0, 1.05, 0.05).
seed (array, optional): random states. Defaults to np.arange(0, 100).
nproc (int, optional): number of processes to use. Defaults to 8.
Returns:
array: rate-based plasticity and postsynaptic firing for all possible combinations of u, and w.
Has shape (#random_states, u.size * w.size, 2).
"""
pool = multiprocessing.Pool(processes=nproc)
results = np.zeros([seed.size, u.size*w.size, 2])
for i, s in enumerate(tqdm(seed, desc='Seed')):
results[i] = np.array(pool.starmap(stdp2rbp_linear_calcium_mat, product(u, w, np.array([s]))))
return results
def main_non_linear_calcium_mat(u=np.arange(0, 101),
w=np.arange(0, 1.05, 0.05),
seed=np.arange(0, 100),
nproc=2):
"""
Same as ~cbsp.population_2.main_linear_calcium_mat(u, w, seed, nproc) for the non linear calcium model.
"""
pool = multiprocessing.Pool(processes=nproc)
results = np.zeros([seed.size, u.size*w.size, 2])
for i, s in enumerate(tqdm(seed, desc='Seed')):
results[i] = np.array(pool.starmap(stdp2rbp_non_linear_calcium_mat, product(u, w, np.array([s]))))
return results
def main_linear_calcium_aeif(u= | np.arange(0, 101) | numpy.arange |
import numpy as np
import matplotlib.animation
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1
import scipy.interpolate
pi = np.pi
def rebin_1D(a, shape):
sh = shape[0],a.shape[0]//shape[0]
return a.reshape(sh).mean(-1)
def rebin_2D(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
def log_bin(array, x, x_min, x_max, n_bin):
bin_edges = np.logspace(np.log10(x_min), np.log10(x_max), n_bin+1, endpoint=True)
binned_array = np.zeros(n_bin, dtype=array.dtype)
mean_x = np.zeros(n_bin, dtype=array.dtype)
for i in range(n_bin):
M = np.logical_and(bin_edges[i] <= x, x < bin_edges[i+1])
binned_array[i] = np.mean(array[M])
mean_x[i] = np.mean(x[M])
return binned_array, mean_x
def create_Gaussian_field_1d(P, n_grid, box_size, mean=0, output_FT=False, precision=np.float32):
# k_min = 2.0*pi/box_size
# k = np.fft.rfftfreq(n_grid, d=1.0/n_grid)*k_min
# P_grid = P(k)
# if np.any(P_grid <= 0):
# m_ft = np.zeros(k.shape, dtype=np.complex64)
# m_ft[P_grid>0] = np.random.normal(scale=np.sqrt((n_grid/box_size)*n_grid*P_grid[P_grid>0]))*np.exp(2j*pi*np.random.random(k.shape)[P_grid>0])
# else:
# m_ft = np.random.normal(scale=np.sqrt((n_grid/box_size)*n_grid*P_grid))*np.exp(2j*pi*np.random.random(k.shape))
# m_ft[k == 0] = mean
# m = np.fft.irfft(m_ft)
# return m
k_grid = np.fft.rfftfreq(n_grid).astype(precision)
k_min = 2*pi/(box_size/n_grid)
V = box_size/(n_grid)**2
P_grid = np.atleast_1d(P(k_grid*k_min))
m_ft = np.random.normal(scale=np.sqrt(1/V*P_grid))*np.exp(2j*pi*np.random.random(k_grid.shape))
if mean != 0:
m_ft[k_grid == 0] = mean
else:
m_ft[k_grid == 0] = np.random.normal(scale=np.sqrt(1/V*P_grid[k_grid==0]))
m = np.fft.irfft(m_ft)
if output_FT:
return m, m_ft, k_grid*k_min
else:
return m
def calculate_pseudo_P_k_1d(m1, m2, box_size, n_k_bin=None, k_min=None, k_max=None, logspaced=False):
if m1.shape != m2.shape:
raise ValueError("Map dimensions don't match: {}x{} vs {}x{}".format(*(m1.shape + m2.shape)))
m1m2 = np.fft.rfft(m1)*np.conj(np.fft.rfft(m2))
k_grid = np.fft.rfftfreq(m1.shape[0])
k_min_box = 2*pi/(box_size/m1.shape[0])
if n_k_bin == None:
bin_edges = k_grid + k_min_box/2
Pk_real = m1m2[1:].real
Pk_imag = m1m2[1:].imag
Pk_err = np.zeros_like(Pk_real)
k_mean = k_grid[1:]
n_mode = np.ones(Pk_real.size, dtype=int)
else:
if logspaced:
bin_edges = np.logspace(np.log10(k_min/k_min_box), np.log10(k_max/k_min_box), n_k_bin+1, endpoint=True)
else:
bin_edges = np.linspace(k_min/k_min_box, k_max/k_min_box, n_k_bin+1, endpoint=True)
n_bin = n_k_bin
Pk_real = np.zeros(n_bin)
Pk_err = np.zeros(n_bin)
Pk_imag = np.zeros(n_bin)
k_mean = np.zeros(n_bin)
n_mode = np.zeros(n_bin)
bin_idx = | np.searchsorted(k_grid, bin_edges) | numpy.searchsorted |
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
from scipy.special import erf
import fitsio
import glob
import os
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
from desitarget.io import read_targets_in_tiles
from desitarget.sv3 import sv3_targetmask
from LSS.Cosmo import distance
def tile2rosette(tile):
if tile < 433:
return (tile-1)//27
else:
if tile >= 433 and tile < 436:
return 13
if tile >= 436 and tile < 439:
return 14
if tile >= 439 and tile < 442:
return 15
if tile >= 442 and tile <=480:
return (tile-442)//3
if tile > 480:
return tile//30
return 999999 #shouldn't be any more?
def calc_rosr(rosn,ra,dec):
#given rosetter number and ra,dec, calculate distance from center
roscen = {0:(150.100,2.182),1:(179.6,0),2:(183.1,0),3:(189.9,61.8),4:(194.75,28.2)\
,5:(210.0,5.0),6:(215.5,52.5),7:(217.8,34.4),8:(216.3,-0.6),9:(219.8,-0.6)\
,10:(218.05,2.43),11:(242.75,54.98),12:(241.05,43.45),13:(245.88,43.45),14:(252.5,34.5)\
,15:(269.73,66.02),16:(194.75,24.7),17:(212.8,-0.6),18:(269.73,62.52),19:(236.1,43.45)}
ra = ra*np.pi/180.
dec = dec*np.pi/180.
rac,decc = roscen[rosn]
rac = rac*np.pi/180.
decc = decc*np.pi/180.
cd = np.sin(dec)* | np.sin(decc) | numpy.sin |
# -*- coding: utf-8 -*-
"""
name: demo_ncnn.py
date: 2020-12-16 11:21:07
Env.: Python 3.7.3, WIN 10
"""
import argparse
from abc import ABCMeta, abstractmethod
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
from tqdm import tqdm
# Copy from nanodet/util/visualization.py
_COLORS = (
np.array(
[
0.000,
0.447,
0.741,
0.850,
0.325,
0.098,
0.929,
0.694,
0.125,
0.494,
0.184,
0.556,
0.466,
0.674,
0.188,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
1.000,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.286,
0.286,
0.286,
0.429,
0.429,
0.429,
0.571,
0.571,
0.571,
0.714,
0.714,
0.714,
0.857,
0.857,
0.857,
0.000,
0.447,
0.741,
0.314,
0.717,
0.741,
0.50,
0.5,
0,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
"""
Get resize matrix for resizing raw img to input size
:param raw_shape: (width, height) of raw image
:param dst_shape: (width, height) of input image
:param keep_ratio: whether keep original ratio
:return: 3x3 Matrix
"""
r_w, r_h = raw_shape
d_w, d_h = dst_shape
Rs = np.eye(3)
if keep_ratio:
C = np.eye(3)
C[0, 2] = -r_w / 2
C[1, 2] = -r_h / 2
if r_w / r_h < d_w / d_h:
ratio = d_h / r_h
else:
ratio = d_w / r_w
Rs[0, 0] *= ratio
Rs[1, 1] *= ratio
T = np.eye(3)
T[0, 2] = 0.5 * d_w
T[1, 2] = 0.5 * d_h
return T @ Rs @ C
else:
Rs[0, 0] *= d_w / r_w
Rs[1, 1] *= d_h / r_h
return Rs
def warp_boxes(boxes, M, width, height):
"""Apply transform to boxes
Copy from nanodet/data/transform/warp.py
"""
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
def overlay_bbox_cv(img, all_box, class_names):
"""Draw result boxes
Copy from nanodet/util/visualization.py
"""
# all_box array of [label, x0, y0, x1, y1, score]
all_box.sort(key=lambda v: v[5])
for box in all_box:
label, x0, y0, x1, y1, score = box
# color = self.cmap(i)[:3]
color = (_COLORS[label] * 255).astype(np.uint8).tolist()
text = "{}:{:.1f}%".format(class_names[label], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[label]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.5, 2)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
cv2.rectangle(
img,
(x0, y0 - txt_size[1] - 1),
(x0 + txt_size[0] + txt_size[1], y0 - 1),
color,
-1,
)
cv2.putText(img, text, (x0, y0 - 1), font, 0.5, txt_color, thickness=1)
return img
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class NanoDetABC(metaclass=ABCMeta):
def __init__(
self,
input_shape=[320, 320],
reg_max=7,
strides=[8, 16, 32],
prob_threshold=0.4,
iou_threshold=0.3,
num_candidate=1000,
top_k=-1,
):
self.strides = strides
self.input_shape = input_shape
self.reg_max = reg_max
self.prob_threshold = prob_threshold
self.iou_threshold = iou_threshold
self.num_candidate = num_candidate
self.top_k = top_k
self.img_mean = [103.53, 116.28, 123.675]
self.img_std = [57.375, 57.12, 58.395]
self.input_size = (self.input_shape[1], self.input_shape[0])
self.class_names = [
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic_light",
"fire_hydrant",
"stop_sign",
"parking_meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports_ball",
"kite",
"baseball_bat",
"baseball_glove",
"skateboard",
"surfboard",
"tennis_racket",
"bottle",
"wine_glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot_dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted_plant",
"bed",
"dining_table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell_phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy_bear",
"hair_drier",
"toothbrush",
]
def preprocess(self, img):
# resize image
ResizeM = get_resize_matrix((img.shape[1], img.shape[0]), self.input_size, True)
img_resize = cv2.warpPerspective(img, ResizeM, dsize=self.input_size)
# normalize image
img_input = img_resize.astype(np.float32) / 255
img_mean = np.array(self.img_mean, dtype=np.float32).reshape(1, 1, 3) / 255
img_std = np.array(self.img_std, dtype=np.float32).reshape(1, 1, 3) / 255
img_input = (img_input - img_mean) / img_std
# expand dims
img_input = np.transpose(img_input, [2, 0, 1])
img_input = | np.expand_dims(img_input, axis=0) | numpy.expand_dims |
# from data_utils import get_stock_data
import os
import time
import numpy as np
import pandas as pd
import tushare as ts
from config import Arg
args = Arg()
# 该系列代码所要求的股票文件名称必须是股票代码+csv的格式,如000001.csv
# --------------------------训练集数据的处理--------------------- #
def get_train_data(batch_size=args.batch_size, time_step=args.time_step):
ratio = args.ratio
stock_len = args.stock_len
len_index = []
batch_index = []
val_index = []
train_dir = args.train_dir
df = open(train_dir)
data_otrain = pd.read_csv(df)
data_train = data_otrain.iloc[:, 1:].values
print(len(data_train))
label_train = data_otrain.iloc[:, -1].values
normalized_train_data = (data_train - np.mean(data_train, axis=0)) / | np.std(data_train, axis=0) | numpy.std |
import numpy as np
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.plane_task_us_env import PlaneTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import (
ConstPhantomGenerator,
ConstProbeGenerator,
RandomProbeGenerator)
import envs.logger
import sys
N_STEPS_PER_EPISODE = 32
N_WORKERS = 4
IMAGING_SYSTEM = ImagingSystem(
c=1540,
fs=100e6,
image_width=40 / 1000,
image_height=90 / 1000,
image_resolution=(40, 90), # [pixels]
median_filter_size=5,
dr_threshold=-200,
dec=1,
no_lines=64
)
DEFAULT_PHANTOM = ScatterersPhantom(
objects=[
Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]), # X, Y, Z
scale=12 / 1000,
head_offset=.9
)
],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
DEFAULT_PHANTOM_GENERATOR = ConstPhantomGenerator(DEFAULT_PHANTOM)
def focal_point_env_fn(trajectory_logger, probe_generator,
phantom_generator=None,
probe_dislocation_prob=None,
dislocation_seed=None,
max_probe_dislocation=None,
step_size=10/1000):
if not phantom_generator:
phantom_generator = DEFAULT_PHANTOM_GENERATOR
imaging = IMAGING_SYSTEM
env = FocalPointTaskUsEnv(
dx_reward_coeff=2,
dz_reward_coeff=1,
imaging=imaging,
phantom_generator=phantom_generator,
probe_generator=probe_generator,
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
probe_dislocation_prob=probe_dislocation_prob,
dislocation_seed=dislocation_seed,
max_probe_dislocation=max_probe_dislocation,
step_size=step_size
)
return env
def plane_task_env_fn(trajectory_logger, probe_generator,
phantom_generator=None,
probe_dislocation_prob=None,
dislocation_seed=None,
max_probe_disloc=None,
max_probe_disrot=None,
step_size=5/1000,
rot_deg=20):
if not phantom_generator:
phantom_generator = DEFAULT_PHANTOM_GENERATOR
imaging = IMAGING_SYSTEM
return PlaneTaskUsEnv(
dx_reward_coeff=1,
angle_reward_coeff=1,
imaging=imaging,
phantom_generator=phantom_generator,
probe_generator=probe_generator,
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
step_size=step_size,
rot_deg=rot_deg,
probe_dislocation_prob=probe_dislocation_prob,
max_probe_disloc=max_probe_disloc,
max_probe_disrot=max_probe_disrot,
dislocation_seed=dislocation_seed
)
def test_reset():
"""Test created to check a single observation/env state visualization."""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
def test_moving_probe_works():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(1) # left
env.step(2) # right (should come from cache)
env.step(2) # right (should come from cache)
env.step(2) # right
env.step(4) # down
env.step(3) # up (cached)
env.step(3) # up
def test_rewards():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(2) # right
env.step(4) # down
env.step(3) # up
def test_nop():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(0) # NOP
env.step(2) # right
env.step(0) # NOP
def test_cannot_move_probe_outside_phantom_area():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([-20 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=10 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left - BUMP
env.step(2) # right # -10
env.step(2) # right # 0
env.step(2) # right # 10
env.step(2) # right # 20
env.step(2) # right # 20 - BUMP
env.step(3) # up # 0
env.step(3) # up # 0 - BUMP
env.step(4) # down # 10
env.step(4) # down # 20
env.step(4) # down # 30
env.step(4) # down # 40
env.step(4) # down # 50
env.step(4) # down # 60
env.step(4) # down # 70
env.step(4) # down # 80
env.step(4) # down # 90
env.step(4) # down # 90 - BUMP
def test_caching_works():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(2) # right (should come from cache)
def test_random_probe_generator():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]), # X, Y, Z
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
phantom_generator = ConstPhantomGenerator(phantom)
probe_generator = RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
# x_pos default
# focal_pos default
)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator,
phantom_generator=phantom_generator)
env.reset()
env.step(1) # left
env.reset()
env.step(2)
env.reset()
env.step(3)
env.reset()
env.step(3)
env.reset()
env.step(1)
env.reset()
env.step(1)
def test_deep_focus():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=0 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(4) # down - 10
env.step(4) # 20
env.step(4) # 30
env.step(4) # 40
env.step(4) # 50
env.step(4) # 60
env.step(4) # 70
env.step(4) # 80
env.step(4) # 90
# probe random dislocations (focal point env)
def test_random_dislocation_1():
"""
Just check if dislocation are drawn for this env.
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=42,
max_probe_dislocation=2
)
env.reset()
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
def test_random_dislocation_2():
"""
Check if dislocations are drawn, and are properly applicated (
should not impact the last reward, should be observable in next state).
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=42,
max_probe_dislocation=2,
step_size=5/1000
)
env.reset()
env.step(1)
env.step(1)
env.step(2)
env.step(2)
env.step(1)
env.step(1)
env.step(2)
env.step(2)
def test_random_no_dislocation_2():
"""
Check if dislocations are drawn, and are properly applicated (
should not impact the last reward, should be observable in next state).
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=None,
max_probe_dislocation=2,
step_size=5/1000
)
env.reset()
env.step(1)
env.step(1)
env.step(2)
env.step(2)
env.step(1)
env.step(1)
env.step(2)
env.step(2)
def test_rotate_1():
"""
rotate in the center of the object 540 degree,
in one direction, in the other direction
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = plane_task_env_fn(trajactory_logger, probe_generator=probe_generator,
rot_deg=45)
env.reset()
env.step(3) # 45
env.step(3) # 90
env.step(3) # 135
env.step(3) # 180
env.step(3) # 225
env.step(3) # 270
env.step(3) # 315
env.step(3) # 360
env.step(3) # 45
env.step(4) # should use cache
env.step(4)
env.step(4)
env.step(4)
env.step(4)
env.step(4)
def test_rotate_2():
"""
left, left, rotate, rotate, right, right, right, rotate, rotate
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos= | np.array([0 / 1000, 0, 0]) | numpy.array |
#!/usr/bin/env python
"""
@package npt
Runs a simulation to compute condensed phase properties (for example, the density
or the enthalpy of vaporization) and compute the derivative with respect
to changing the force field parameters. This script is a part of ForceBalance.
All code in this repository is released under the BSD 3-Clause License (aka BSD 2.0).
Please see github.com/leeping/forcebalance for more details.
"""
from __future__ import division
#==================#
#| Global Imports |#
#==================#
from builtins import zip
from builtins import range
import os
import sys
import glob
import shutil
import argparse
import traceback
import numpy as np
from copy import deepcopy
from collections import namedtuple, OrderedDict
from forcebalance.forcefield import FF
from forcebalance.nifty import col, flat, lp_dump, lp_load, printcool, printcool_dictionary, statisticalInefficiency, which, _exec, isint, wopen, click
from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, f1d7p, in_fd
from forcebalance.molecule import Molecule
from forcebalance.output import getLogger
logger = getLogger(__name__)
#========================================================#
#| Global, user-tunable variables (simulation settings) |#
#========================================================#
parser = argparse.ArgumentParser()
parser.add_argument('engine', help='MD program that we are using; choose "openmm", "tinker", "amber" or "gromacs"')
parser.add_argument('temperature',type=float, help='Temperature (K)')
parser.add_argument('pressure',type=float, help='Pressure (Atm)')
args = parser.parse_args()
faststep = 0.25 # "fast" timestep (for MTS integrator, if used)
temperature = args.temperature # temperature in kelvin
pressure = args.pressure # pressure in atmospheres
engname = args.engine.lower() # Name of the engine
if engname == "openmm":
try:
try:
from openmm.unit import *
from openmm import *
from openmm.app import *
except ImportError:
from simtk.unit import *
from simtk.openmm import *
from simtk.openmm.app import *
except:
traceback.print_exc()
raise Exception("Cannot import OpenMM modules")
from forcebalance.openmmio import *
Engine = OpenMM
elif engname == "gromacs" or engname == "gmx":
from forcebalance.gmxio import *
Engine = GMX
elif engname == "tinker":
from forcebalance.tinkerio import *
Engine = TINKER
elif engname == "amber":
from forcebalance.amberio import *
Engine = AMBER
elif engname == "smirnoff":
from forcebalance.smirnoffio import *
Engine = SMIRNOFF
else:
raise Exception('OpenMM, SMIRNOFF/OpenMM, GROMACS, TINKER, and AMBER are supported at this time.')
#==================#
#| Subroutines |#
#==================#
def mean_stderr(ts):
""" Get mean and standard deviation of a time series. """
return np.mean(ts), np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
def bzavg(obs,boltz):
""" Get the Boltzmann average of an observable. """
if obs.ndim == 2:
if obs.shape[0] == len(boltz) and obs.shape[1] == len(boltz):
raise Exception('Error - both dimensions have length equal to number of snapshots, now confused!')
elif obs.shape[0] == len(boltz):
return np.sum(obs*boltz.reshape(-1,1),axis=0)/np.sum(boltz)
elif obs.shape[1] == len(boltz):
return np.sum(obs*boltz,axis=1)/np.sum(boltz)
else:
raise Exception('The dimensions are wrong!')
elif obs.ndim == 1:
return np.dot(obs,boltz)/sum(boltz)
else:
raise Exception('The number of dimensions can only be 1 or 2!')
def PrintEDA(EDA, N):
# Get energy decomposition statistics.
PrintDict = OrderedDict()
for key, val in EDA.items():
val_avg, val_err = mean_stderr(val)
if val_avg == 0.0: continue
if val_err == 0.0: continue
PrintDict[key] = "% 12.4f +- %10.4f [ % 9.4f +- %7.4f ]" % (val_avg, val_err, val_avg/N, val_err/N)
printcool_dictionary(PrintDict, "Energy Component Analysis, Mean +- Stderr [Per Molecule] (kJ/mol)")
#=============================================#
#| Functions for differentiating energy |#
#| and properties |#
#=============================================#
def energy_derivatives(engine, FF, mvals, h, pgrad, length, AGrad=True, dipole=False):
"""
Compute the first and second derivatives of a set of snapshot
energies with respect to the force field parameters.
This basically calls the finite difference subroutine on the
energy_driver subroutine also in this script.
@param[in] mvals Mathematical parameter values
@param[in] h Finite difference step size
@param[in] phase The phase (liquid, gas) to perform the calculation on
@param[in] AGrad Switch to turn derivatives on or off; if off, return all zeros
@param[in] dipole Switch for dipole derivatives.
@return G First derivative of the energies in a N_param x N_coord array
@return GDx First derivative of the box dipole moment x-component in a N_param x N_coord array
@return GDy First derivative of the box dipole moment y-component in a N_param x N_coord array
@return GDz First derivative of the box dipole moment z-component in a N_param x N_coord array
"""
G = np.zeros((FF.np,length))
GDx = np.zeros((FF.np,length))
GDy = np.zeros((FF.np,length))
GDz = np.zeros((FF.np,length))
if not AGrad:
return G, GDx, GDy, GDz
def energy_driver(mvals_):
FF.make(mvals_)
if dipole:
return engine.energy_dipole()
else:
return engine.energy()
ED0 = energy_driver(mvals)
for i in pgrad:
logger.info("%i %s\r" % (i, (FF.plist[i] + " "*30)))
EDG, _ = f12d3p(fdwrap(energy_driver,mvals,i),h,f0=ED0)
if dipole:
G[i,:] = EDG[:,0]
GDx[i,:] = EDG[:,1]
GDy[i,:] = EDG[:,2]
GDz[i,:] = EDG[:,3]
else:
G[i,:] = EDG[:]
return G, GDx, GDy, GDz
def property_derivatives(engine, FF, mvals, h, pgrad, kT, property_driver, property_kwargs, AGrad=True):
"""
Function for double-checking property derivatives. This function is called to perform
a more explicit numerical derivative of the property, rather than going through the
fluctuation formula. It takes longer and is potentially less precise, which means
it's here mainly as a sanity check.
@param[in] mvals Mathematical parameter values
@param[in] h Finite difference step size
@param[in] phase The phase (liquid, gas) to perform the calculation on
@param[in] property_driver The function that calculates the property
@param[in] property_driver A dictionary of arguments that goes into calculating the property
@param[in] AGrad Switch to turn derivatives on or off; if off, return all zeros
@return G First derivative of the property
"""
G = np.zeros(FF.np)
if not AGrad:
return G
def energy_driver(mvals_):
FF.make(mvals_)
return engine.energy_dipole()
ED0 = energy_driver(mvals)
E0 = ED0[:,0]
D0 = ED0[:,1:]
P0 = property_driver(None, **property_kwargs)
if 'h_' in property_kwargs:
H0 = property_kwargs['h_'].copy()
for i in pgrad:
logger.info("%s\n" % (FF.plist[i] + " "*30))
ED1 = fdwrap(energy_driver,mvals,i)(h)
E1 = ED1[:,0]
D1 = ED1[:,1:]
b = np.exp(-(E1-E0)/kT)
b /= np.sum(b)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy() + (E1-E0)
if 'd_' in property_kwargs:
property_kwargs['d_'] = D1.copy()
S = -1*np.dot(b,np.log(b))
InfoContent = np.exp(S)
if InfoContent / len(E0) < 0.1:
logger.warn("Warning: Effective number of snapshots: % .1f (out of %i)\n" % (InfoContent, len(E0)))
P1 = property_driver(b=b,**property_kwargs)
EDM1 = fdwrap(energy_driver,mvals,i)(-h)
EM1 = EDM1[:,0]
DM1 = EDM1[:,1:]
b = np.exp(-(EM1-E0)/kT)
b /= np.sum(b)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy() + (EM1-E0)
if 'd_' in property_kwargs:
property_kwargs['d_'] = DM1.copy()
S = -1*np.dot(b,np.log(b))
InfoContent = np.exp(S)
if InfoContent / len(E0) < 0.1:
logger.warn("Warning: Effective number of snapshots: % .1f (out of %i)\n" % (InfoContent, len(E0)))
PM1 = property_driver(b=b,**property_kwargs)
G[i] = (P1-PM1)/(2*h)
if 'h_' in property_kwargs:
property_kwargs['h_'] = H0.copy()
if 'd_' in property_kwargs:
property_kwargs['d_'] = D0.copy()
return G
def main():
"""
Usage: (runcuda.sh) npt.py <openmm|gromacs|tinker|amber> <liquid_nsteps> <liquid_timestep (fs)> <liquid_intvl (ps> <temperature> <pressure>
This program is meant to be called automatically by ForceBalance on
a GPU cluster (specifically, subroutines in openmmio.py). It is
not easy to use manually. This is because the force field is read
in from a ForceBalance 'FF' class.
I wrote this program because automatic fitting of the density (or
other equilibrium properties) is computationally intensive, and the
calculations need to be distributed to the queue. The main instance
of ForceBalance (running on my workstation) queues up a bunch of these
jobs (using Work Queue). Then, I submit a bunch of workers to GPU
clusters (e.g. Certainty, Keeneland). The worker scripts connect to.
the main instance and receives one of these jobs.
This script can also be executed locally, if you want to (e.g. for
debugging). Just make sure you have the pickled 'forcebalance.p'
file.
"""
printcool("ForceBalance condensed phase simulation using engine: %s" % engname.upper(), color=4, bold=True)
#----
# Load the ForceBalance pickle file which contains:
#----
# - Force field object
# - Optimization parameters
# - Options from the Target object that launched this simulation
# - Switch for whether to evaluate analytic derivatives.
FF,mvals,TgtOptions,AGrad = lp_load('forcebalance.p')
FF.ffdir = '.'
# Write the force field file.
FF.make(mvals)
#----
# Load the options that are set in the ForceBalance input file.
#----
# Finite difference step size
h = TgtOptions['h']
pgrad = TgtOptions['pgrad']
# MD options; time step (fs), production steps, equilibration steps, interval for saving data (ps)
liquid_timestep = TgtOptions['liquid_timestep']
liquid_nsteps = TgtOptions['liquid_md_steps']
liquid_nequil = TgtOptions['liquid_eq_steps']
liquid_intvl = TgtOptions['liquid_interval']
liquid_fnm = TgtOptions['liquid_coords']
liquid_nbeads = TgtOptions ['liquid_nbeads']
gas_timestep = TgtOptions['gas_timestep']
gas_nsteps = TgtOptions['gas_md_steps']
gas_nequil = TgtOptions['gas_eq_steps']
gas_intvl = TgtOptions['gas_interval']
gas_fnm = TgtOptions['gas_coords']
gas_nbeads = TgtOptions ['gas_nbeads']
# Number of threads, multiple timestep integrator, anisotropic box etc.
threads = TgtOptions.get('md_threads', 1)
mts = TgtOptions.get('mts_integrator', 0)
rpmd_beads = TgtOptions.get('rpmd_beads', 0)
force_cuda = TgtOptions.get('force_cuda', 0)
nbarostat = TgtOptions.get('n_mcbarostat', 25)
anisotropic = TgtOptions.get('anisotropic_box', 0)
minimize = TgtOptions.get('minimize_energy', 1)
pimd = TgtOptions.get ('pimd',0)
# Print all options.
printcool_dictionary(TgtOptions, title="Options from ForceBalance")
liquid_snapshots = int((liquid_nsteps * liquid_timestep / 1000) / liquid_intvl)
liquid_iframes = int(1000 * liquid_intvl / liquid_timestep)
gas_snapshots = int((gas_nsteps * gas_timestep / 1000) / gas_intvl)
gas_iframes = int(1000 * gas_intvl / gas_timestep)
logger.info("For the condensed phase system, I will collect %i snapshots spaced apart by %i x %.3f fs time steps\n" \
% (liquid_snapshots, liquid_iframes, liquid_timestep))
if liquid_snapshots < 2:
raise Exception('Please set the number of liquid time steps so that you collect at least two snapshots (minimum %i)' \
% (2000 * int(liquid_intvl/liquid_timestep)))
logger.info("For the gas phase system, I will collect %i snapshots spaced apart by %i x %.3f fs time steps\n" \
% (gas_snapshots, gas_iframes, gas_timestep))
if gas_snapshots < 2:
raise Exception('Please set the number of gas time steps so that you collect at least two snapshots (minimum %i)' \
% (2000 * int(gas_intvl/gas_timestep)))
#----
# Loading coordinates
#----
ML = Molecule(liquid_fnm, toppbc=True)
MG = Molecule(gas_fnm)
# Determine the number of molecules in the condensed phase coordinate file.
NMol = TgtOptions['n_molecules']
logger.info("There are %i molecules in the liquid\n" % (NMol))
#----
# Setting up MD simulations
#----
EngOpts = OrderedDict()
EngOpts["liquid"] = OrderedDict([("coords", liquid_fnm), ("mol", ML), ("pbc", True)])
if "nonbonded_cutoff" in TgtOptions:
EngOpts["liquid"]["nonbonded_cutoff"] = TgtOptions["nonbonded_cutoff"]
if "vdw_cutoff" in TgtOptions:
EngOpts["liquid"]["vdw_cutoff"] = TgtOptions["vdw_cutoff"]
EngOpts["gas"] = OrderedDict([("coords", gas_fnm), ("mol", MG), ("pbc", False)])
GenOpts = OrderedDict([('FF', FF)])
if engname in ["openmm", "smirnoff"]:
# OpenMM-specific options
EngOpts["liquid"]["platname"] = TgtOptions.get("platname", 'CUDA')
# For now, always run gas phase calculations on the reference platform
EngOpts["gas"]["platname"] = 'Reference'
if force_cuda:
try: Platform.getPlatformByName('CUDA')
except: raise RuntimeError('Forcing failure because CUDA platform unavailable')
EngOpts["liquid"]["platname"] = 'CUDA'
if threads > 1: logger.warn("Setting the number of threads will have no effect on OpenMM engine.\n")
if engname == "smirnoff":
if not TgtOptions['liquid_coords'].endswith('.pdb'):
logger.error("With SMIRNOFF engine, please pass a .pdb file to liquid_coords.")
raise RuntimeError
EngOpts["liquid"]["pdb"] = TgtOptions['liquid_coords']
EngOpts["liquid"]["mol2"] = TgtOptions["mol2"]
if not TgtOptions['gas_coords'].endswith('.pdb'):
logger.error("With SMIRNOFF engine, please pass a .pdb file to gas_coords.")
raise RuntimeError
EngOpts["gas"]["pdb"] = TgtOptions['gas_coords']
EngOpts["gas"]["mol2"] = TgtOptions["mol2"]
elif engname == "gromacs":
# Gromacs-specific options
GenOpts["gmxpath"] = TgtOptions["gmxpath"]
GenOpts["gmxsuffix"] = TgtOptions["gmxsuffix"]
EngOpts["liquid"]["gmx_top"] = os.path.splitext(liquid_fnm)[0] + ".top"
EngOpts["liquid"]["gmx_mdp"] = os.path.splitext(liquid_fnm)[0] + ".mdp"
EngOpts["liquid"]["gmx_eq_barostat"] = TgtOptions["gmx_eq_barostat"]
EngOpts["gas"]["gmx_top"] = os.path.splitext(gas_fnm)[0] + ".top"
EngOpts["gas"]["gmx_mdp"] = os.path.splitext(gas_fnm)[0] + ".mdp"
if force_cuda: logger.warn("force_cuda option has no effect on Gromacs engine.")
if rpmd_beads > 0: raise RuntimeError("Gromacs cannot handle RPMD.")
if mts: logger.warn("Gromacs not configured for multiple timestep integrator.")
if anisotropic: logger.warn("Gromacs not configured for anisotropic box scaling.")
elif engname == "tinker":
# Tinker-specific options
GenOpts["tinkerpath"] = TgtOptions["tinkerpath"]
EngOpts["liquid"]["tinker_key"] = os.path.splitext(liquid_fnm)[0] + ".key"
EngOpts["gas"]["tinker_key"] = os.path.splitext(gas_fnm)[0] + ".key"
if force_cuda: logger.warn("force_cuda option has no effect on Tinker engine.")
if rpmd_beads > 0: raise RuntimeError("TINKER cannot handle RPMD.")
if mts: logger.warn("Tinker not configured for multiple timestep integrator.")
elif engname == "amber":
# AMBER-specific options
GenOpts["amberhome"] = TgtOptions["amberhome"]
if os.path.exists(os.path.splitext(liquid_fnm)[0] + ".mdin"):
EngOpts["liquid"]["mdin"] = os.path.splitext(liquid_fnm)[0] + ".mdin"
if os.path.exists(os.path.splitext(gas_fnm)[0] + ".mdin"):
EngOpts["gas"]["mdin"] = os.path.splitext(gas_fnm)[0] + ".mdin"
EngOpts["liquid"]["leapcmd"] = os.path.splitext(liquid_fnm)[0] + ".leap"
EngOpts["gas"]["leapcmd"] = os.path.splitext(gas_fnm)[0] + ".leap"
EngOpts["liquid"]["pdb"] = liquid_fnm
EngOpts["gas"]["pdb"] = gas_fnm
if force_cuda: logger.warn("force_cuda option has no effect on Amber engine.")
if rpmd_beads > 0: raise RuntimeError("AMBER cannot handle RPMD.")
if mts: logger.warn("Amber not configured for multiple timestep integrator.")
EngOpts["liquid"].update(GenOpts)
EngOpts["gas"].update(GenOpts)
for i in EngOpts:
printcool_dictionary(EngOpts[i], "Engine options for %s" % i)
# Set up MD options
MDOpts = OrderedDict()
MDOpts["liquid"] = OrderedDict([("nsteps", liquid_nsteps), ("timestep", liquid_timestep),
("temperature", temperature), ("pressure", pressure),
("nequil", liquid_nequil), ("minimize", minimize),
("nsave", int(1000 * liquid_intvl / liquid_timestep)),
("verbose", True), ('save_traj', TgtOptions['save_traj']),
("threads", threads), ("anisotropic", anisotropic), ("nbarostat", nbarostat),
("mts", mts), ("rpmd_beads", rpmd_beads), ("faststep", faststep),
("pimd", pimd), ("nbeads", liquid_nbeads) ])
MDOpts["gas"] = OrderedDict([("nsteps", gas_nsteps), ("timestep", gas_timestep),
("temperature", temperature), ("nsave", int(1000 * gas_intvl / gas_timestep)),
("nequil", gas_nequil), ("minimize", minimize), ("threads", 1), ("mts", mts),
("rpmd_beads", rpmd_beads), ("faststep", faststep), ("pimd", pimd),
("nbeads", gas_nbeads)])
# Energy components analysis disabled for OpenMM MTS because it uses force groups
if (engname == "openmm" and mts): logger.warn("OpenMM with MTS integrator; energy components analysis will be disabled.\n")
# Create instances of the MD Engine objects.
Liquid = Engine(name="liquid", **EngOpts["liquid"])
Gas = Engine(name="gas", **EngOpts["gas"])
#=================================================================#
# Run the simulation for the full system and analyze the results. #
#=================================================================#
printcool("Condensed phase molecular dynamics", color=4, bold=True)
# This line runs the condensed phase simulation.
click()
prop_return = Liquid.molecular_dynamics(**MDOpts["liquid"])
if hasattr(Liquid, 'freeze_atoms'):
logger.info("Warning: freeze_atoms may result in incorrect system mass and incorrect density calculation\n")
logger.info("Liquid phase MD simulation took %.3f seconds\n" % click())
Rhos = prop_return['Rhos']
Potentials = prop_return['Potentials']
Kinetics = prop_return['Kinetics']
Volumes = prop_return['Volumes']
Dips = prop_return['Dips']
EDA = prop_return['Ecomps']
# Create a bunch of physical constants.
# Energies are in kJ/mol
# Lengths are in nanometers.
L = len(Rhos)
kB = 0.008314472471220214
T = temperature
kT = kB * T
mBeta = -1.0 / kT
Beta = 1.0 / kT
atm_unit = 0.061019351687175
bar_unit = 0.060221417930000
# This is how I calculated the prefactor for the dielectric constant.
# eps0 = 8.854187817620e-12 * coulomb**2 / newton / meter**2
# epsunit = 1.0*(debye**2) / nanometer**3 / BOLTZMANN_CONSTANT_kB / kelvin
# prefactor = epsunit/eps0/3
prefactor = 30.348705333964077
# Gather some physical variables.
Energies = Potentials + Kinetics
Ene_avg, Ene_err = mean_stderr(Energies)
pV = atm_unit * pressure * Volumes
pV_avg, pV_err = mean_stderr(pV)
Rho_avg, Rho_err = mean_stderr(Rhos)
PrintEDA(EDA, NMol)
#==============================================#
# Now run the simulation for just the monomer. #
#==============================================#
# Run the OpenMM simulation, gather information.
printcool("Gas phase molecular dynamics", color=4, bold=True)
click()
mprop_return = Gas.molecular_dynamics(**MDOpts["gas"])
logger.info("Gas phase MD simulation took %.3f seconds\n" % click())
mPotentials = mprop_return['Potentials']
mKinetics = mprop_return['Kinetics']
mEDA = mprop_return['Ecomps']
mEnergies = mPotentials + mKinetics
mEne_avg, mEne_err = mean_stderr(mEnergies)
PrintEDA(mEDA, 1)
#============================================#
# Compute the potential energy derivatives. #
#============================================#
logger.info("Calculating potential energy derivatives with finite difference step size: %f\n" % h)
# Switch for whether to compute the derivatives two different ways for consistency.
FDCheck = False
# Create a double-precision simulation object if desired (seems unnecessary).
DoublePrecisionDerivatives = False
if engname == "openmm" and DoublePrecisionDerivatives and AGrad:
logger.info("Creating Double Precision Simulation for parameter derivatives\n")
Liquid = Engine(name="liquid", openmm_precision="double", **EngOpts["liquid"])
Gas = Engine(name="gas", openmm_precision="double", **EngOpts["gas"])
# Compute the energy and dipole derivatives.
printcool("Condensed phase energy and dipole derivatives\nInitializing array to length %i" % len(Energies), color=4, bold=True)
click()
G, GDx, GDy, GDz = energy_derivatives(Liquid, FF, mvals, h, pgrad, len(Energies), AGrad, dipole=True)
logger.info("Condensed phase energy derivatives took %.3f seconds\n" % click())
click()
printcool("Gas phase energy derivatives", color=4, bold=True)
mG, _, __, ___ = energy_derivatives(Gas, FF, mvals, h, pgrad, len(mEnergies), AGrad, dipole=False)
logger.info("Gas phase energy derivatives took %.3f seconds\n" % click())
#==============================================#
# Condensed phase properties and derivatives. #
#==============================================#
#----
# Density
#----
# Build the first density derivative.
GRho = mBeta * (flat(np.dot(G, col(Rhos))) / L - np.mean(Rhos) * np.mean(G, axis=1))
# Print out the density and its derivative.
Sep = printcool("Density: % .4f +- % .4f kg/m^3\nAnalytic Derivative:" % (Rho_avg, Rho_err))
FF.print_map(vals=GRho)
logger.info(Sep)
def calc_rho(b = None, **kwargs):
if b is None: b = np.ones(L,dtype=float)
if 'r_' in kwargs:
r_ = kwargs['r_']
return bzavg(r_,b)
# No need to calculate error using bootstrap, but here it is anyway
# Rhoboot = []
# for i in range(numboots):
# boot = np.random.randint(N,size=N)
# Rhoboot.append(calc_rho(None,**{'r_':Rhos[boot]}))
# Rhoboot = np.array(Rhoboot)
# Rho_err = np.std(Rhoboot)
if FDCheck:
Sep = printcool("Numerical Derivative:")
GRho1 = property_derivatives(Liquid, FF, mvals, h, pgrad, kT, calc_rho, {'r_':Rhos})
FF.print_map(vals=GRho1)
Sep = printcool("Difference (Absolute, Fractional):")
absfrac = ["% .4e % .4e" % (i-j, (i-j)/j) for i,j in zip(GRho, GRho1)]
FF.print_map(vals=absfrac)
#----
# Enthalpy of vaporization
#----
H = Energies + pV
V = np.array(Volumes)
# Print out the liquid enthalpy.
logger.info("Liquid enthalpy: % .4f kJ/mol, stdev % .4f ; (% .4f from energy, % .4f from pV)\n" %
(np.mean(H), np.std(H), np.mean(Energies), np.mean(pV)))
numboots = 1000
# The enthalpy of vaporization in kJ/mol.
Hvap_avg = mEne_avg - Ene_avg / NMol + kT - np.mean(pV) / NMol
Hvap_err = np.sqrt(Ene_err**2 / NMol**2 + mEne_err**2 + pV_err**2/NMol**2)
# Build the first Hvap derivative.
GHvap = np.mean(G,axis=1)
GHvap += mBeta * (flat(np.dot(G, col(Energies))) / L - Ene_avg * np.mean(G, axis=1))
GHvap /= NMol
GHvap -= np.mean(mG,axis=1)
GHvap -= mBeta * (flat(np.dot(mG, col(mEnergies))) / L - mEne_avg * np.mean(mG, axis=1))
GHvap *= -1
GHvap -= mBeta * (flat(np.dot(G, col(pV))) / L - np.mean(pV) * np.mean(G, axis=1)) / NMol
Sep = printcool("Enthalpy of Vaporization: % .4f +- %.4f kJ/mol\nAnalytic Derivative:" % (Hvap_avg, Hvap_err))
FF.print_map(vals=GHvap)
# Define some things to make the analytic derivatives easier.
Gbar = np.mean(G,axis=1)
def deprod(vec):
return flat(np.dot(G,col(vec)))/L
def covde(vec):
return flat(np.dot(G,col(vec)))/L - Gbar*np.mean(vec)
def avg(vec):
return np.mean(vec)
#----
# Thermal expansion coefficient
#----
def calc_alpha(b = None, **kwargs):
if b is None: b = np.ones(L,dtype=float)
if 'h_' in kwargs:
h_ = kwargs['h_']
if 'v_' in kwargs:
v_ = kwargs['v_']
return 1/(kT*T) * (bzavg(h_*v_,b)-bzavg(h_,b)*bzavg(v_,b))/bzavg(v_,b)
Alpha = calc_alpha(None, **{'h_':H, 'v_':V})
Alphaboot = []
for i in range(numboots):
boot = np.random.randint(L,size=L)
Alphaboot.append(calc_alpha(None, **{'h_':H[boot], 'v_':V[boot]}))
Alphaboot = np.array(Alphaboot)
Alpha_err = | np.std(Alphaboot) | numpy.std |
import base64
import io
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
import numpy as np
import tensorflow as tf
from PIL import Image
from constants import CLASSES
import yaml
with open('app.yaml') as yaml_data :
params = yaml.safe_load(yaml_data)
IMAGE_WIDTH = params['IMAGE_WIDTH']
IMAGE_HEIGHT = params['IMAGE_HEIGHT']
PATH_MODEL = params['PATH_MODEL']
# Load DNN model
classifier = tf.keras.models.load_model(PATH_MODEL)
def classify_image(image, model, image_box=None):
"""Classify image by model
Parameters
----------
content: image content
model: tf/keras classifier
Returns
-------
class id returned by model classifier
"""
images_list = []
image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)
# box argument clips image to (x1, y1, x2, y2)
image = np.array(image)
images_list.append(image)
return model.predict_classes( | np.array(images_list) | numpy.array |
# Copyright (c) <NAME>.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
# Test for fcmaes coordinated retry applied to https://www.esa.int/gsp/ACT/projects/gtop/
# using https://github.com/esa/pygmo2 / pagmo2 optimization algorithms.
#
# Please install pygmo before executing this test:
# pip install pygmo
import math
from fcmaes.advretry import minimize
from fcmaes.astro import Messenger, Cassini2, Rosetta, Gtoc1, Cassini1, Sagas, Tandem, MessFull
from fcmaes.optimizer import logger, dtime, Optimizer, Sequence, De_cpp, Cma_cpp
from numpy.random import MT19937, Generator
from scipy.optimize import OptimizeResult
import multiprocessing as mp
import numpy as np
import pygmo as pg
import time
class pygmo_udp(object):
"""Wraps a fcmaes fitness function as pygmo udp."""
def __init__(self, fun, bounds):
self.fun = fun
self.bounds = bounds
def fitness(self, x):
return [self.fun(x)]
def get_bounds(self):
return (self.bounds.lb, self.bounds.ub)
def de_cma_pyg(max_evaluations = 50000, popsize=31, stop_fitness = -math.inf,
de_max_evals = None, cma_max_evals = None):
"""Sequence de1220 -> cmaes pagmo."""
deEvals = np.random.uniform(0.1, 0.3)
if de_max_evals is None:
de_max_evals = int(deEvals*max_evaluations)
if cma_max_evals is None:
cma_max_evals = int((1.0-deEvals)*max_evaluations)
opt1 = De_pyg(popsize=popsize, max_evaluations = de_max_evals, stop_fitness = stop_fitness)
opt2 = Cma_pyg(popsize=popsize, max_evaluations = cma_max_evals,
stop_fitness = stop_fitness)
return Sequence([opt1, opt2])
def pyg_de_cma(max_evaluations = 50000, popsize=31, stop_fitness = -math.inf,
de_max_evals = None, cma_max_evals = None):
"""Sequence de1220 -> cmaes c++."""
deEvals = np.random.uniform(0.1, 0.3)
if de_max_evals is None:
de_max_evals = int(deEvals*max_evaluations)
if cma_max_evals is None:
cma_max_evals = int((1.0-deEvals)*max_evaluations)
opt1 = De_cpp(popsize=popsize, max_evaluations = de_max_evals, stop_fitness = stop_fitness)
opt2 = Cma_pyg(popsize=popsize, max_evaluations = cma_max_evals,
stop_fitness = stop_fitness)
return Sequence([opt1, opt2])
def de_pyg_cma(max_evaluations = 50000, popsize=31, stop_fitness = -math.inf,
de_max_evals = None, cma_max_evals = None):
"""Sequence de c++ -> cmaes pagmo."""
deEvals = np.random.uniform(0.1, 0.3)
if de_max_evals is None:
de_max_evals = int(deEvals*max_evaluations)
if cma_max_evals is None:
cma_max_evals = int((1.0-deEvals)*max_evaluations)
opt1 = De_pyg(popsize=popsize, max_evaluations = de_max_evals, stop_fitness = stop_fitness)
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
stop_fitness = stop_fitness)
return Sequence([opt1, opt2])
class Cma_pyg(Optimizer):
"""CMA_ES pagmo implementation."""
def __init__(self, max_evaluations=50000, popsize = 31, guess=None, stop_fitness = -math.inf):
Optimizer.__init__(self, max_evaluations, 'cma pagmo')
self.popsize = popsize
self.guess = guess
self.stop_fitness = stop_fitness
def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None):
gen = int(self.max_eval_num(store) / self.popsize + 1)
algo = pg.algorithm(pg.cmaes(gen=gen, force_bounds = True,
sigma0 = np.mean(sdevs), seed = int(rg.uniform(0, 2**32 - 1))))
udp = pygmo_udp(fun, bounds)
prob = pg.problem(udp)
pop = pg.population(prob, self.popsize)
if not guess is None:
scale = np.multiply(0.5 * (bounds.ub - bounds.lb), sdevs)
for i in range(self.popsize):
xi = | np.random.normal(guess, scale) | numpy.random.normal |
import numpy
from src.ppopt.utils.constraint_utilities import *
from src.ppopt.utils.general_utils import make_row
import pytest
def test_constraint_norm_1():
A = numpy.random.random((10, 10))
b = numpy.random.random((10, 1))
[As, bs] = scale_constraint(A, b)
results = numpy.linalg.norm(As, axis=1)
assert numpy.allclose(numpy.ones(10), results)
def test_constraint_norm_2():
A = -numpy.random.random((10, 10))
b = numpy.random.random((10, 1))
[As, bs] = scale_constraint(A, b)
# print(A)
#
# print(A[0] / As[0])
# print(b[0] / bs[0])
def test_scale_constraint():
A = 2 * numpy.eye(3)
b = numpy.ones(3)
A, b = scale_constraint(A, b)
assert numpy.allclose(A, numpy.eye(3))
assert numpy.allclose(b, .5 * numpy.ones(3))
def test_remove_zero_rows():
A = numpy.random.random((10, 10))
b = numpy.random.random((10, 1))
A[3] = 0
A[7] = 0
index = [0, 1, 2, 4, 5, 6, 8, 9]
[A_, b_] = remove_zero_rows(A, b)
assert numpy.allclose(A_, A[index])
assert numpy.allclose(b_, b[index])
assert A_.shape == A[index].shape
assert b_.shape == b[index].shape
def test_row_equality_1():
a = numpy.array([1, 2, 4])
b = numpy.array([1, 2, 3])
assert not row_equality(a, b)
def test_row_equality_2():
a = numpy.array([1, 2, 3])
b = numpy.array([1, 2, 3])
assert row_equality(a, b)
def test_remove_duplicate_rows():
A = numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 1, 1]])
b = numpy.array([[1], [2], [1], [1]])
[A, b] = remove_duplicate_rows(A, b)
assert A.shape == (3, 3)
assert b.shape == (3, 1)
def test_is_full_rank_1():
A = numpy.eye(5)
assert is_full_rank(A)
def test_is_full_rank_2():
A = numpy.array([[1, 2, 3], [1, 0, 3]])
assert is_full_rank(A)
def test_is_full_rank_3():
A = numpy.eye(10)
A[-1, -1] = 0
assert not is_full_rank(A)
def test_is_full_rank_4():
A = numpy.eye(4)
assert is_full_rank(A, [1, 2, 3])
def test_is_full_rank_5():
A = numpy.array([[1, 0], [1, 0], [0, 1]])
assert not is_full_rank(A)
assert not is_full_rank(A, [0, 1])
assert is_full_rank(A, [1, 2])
def test_is_full_rank_6():
A = numpy.eye(2)
assert is_full_rank(A, [])
def test_remove_redundant_constraints():
A = numpy.array([[-1, 0], [0, -1], [-1, 1], [-1, 1]])
b = numpy.array([[0], [0], [1], [20]])
# [As, bs] = process_region_constraints(A, b)
As, bs = cheap_remove_redundant_constraints(A, b)
As, bs = remove_strongly_redundant_constraints(As, bs)
# As, bs = facet_ball_elimination(As, bs)
A_ss, b_ss = scale_constraint(A, b)
assert numpy.allclose(As, A_ss[[0, 1, 2]])
assert numpy.allclose(bs, b_ss[[0, 1, 2]])
def test_process_region_constraints():
A = numpy.block([[numpy.eye(3)], [-numpy.eye(3)], [make_row([1, 1, 1])]])
b = numpy.block([[numpy.ones((3, 1))], [numpy.zeros((3, 1))], [numpy.array([[1]])]])
[A, b] = process_region_constraints(A, b)
assert A.shape == (4, 3)
assert b.shape == (4, 1)
@pytest.mark.skip(reason="I am scaling the matrix array, expected output has changed")
def test_facet_ball_elimination():
A = numpy.block([[numpy.eye(2)], [-numpy.eye(2)]])
b = numpy.array([[1], [1], [0], [0]])
A_t = numpy.block([[numpy.eye(2)], [-numpy.eye(2)], [numpy.array([[1, 1]])]])
b_t = numpy.array([[2], [2], [0], [0], [1]])
A_r = | numpy.block([[A], [A_t]]) | numpy.block |
import numpy as np
import collections as coll
import pytest
import numpy.testing as npt
from matplotlib.patches import Circle
from skimage.measure import label, regionprops
from pulse2percept.implants import (PointSource, ElectrodeArray, ElectrodeGrid,
ProsthesisSystem)
from pulse2percept.stimuli import Stimulus, ImageStimulus, VideoStimulus
from pulse2percept.models import ScoreboardModel
def test_ProsthesisSystem():
# Invalid instantiations:
with pytest.raises(ValueError):
ProsthesisSystem(ElectrodeArray(PointSource(0, 0, 0)),
eye='both')
with pytest.raises(TypeError):
ProsthesisSystem(Stimulus)
# Iterating over the electrode array:
implant = ProsthesisSystem(PointSource(0, 0, 0))
npt.assert_equal(implant.n_electrodes, 1)
npt.assert_equal(implant[0], implant.earray[0])
npt.assert_equal(implant.electrode_names, implant.earray.electrode_names)
for i, e in zip(implant, implant.earray):
npt.assert_equal(i, e)
# Set a stimulus after the constructor:
npt.assert_equal(implant.stim, None)
implant.stim = 3
npt.assert_equal(isinstance(implant.stim, Stimulus), True)
npt.assert_equal(implant.stim.shape, (1, 1))
| npt.assert_equal(implant.stim.time, None) | numpy.testing.assert_equal |
from datetime import datetime
import numpy as np
import pandas as pd
from course_lib.Base.Evaluation.Evaluator import EvaluatorHoldout
from src.data_management.New_DataSplitter_leave_k_out import New_DataSplitter_leave_k_out
from src.data_management.RecSys2019Reader import RecSys2019Reader
from src.data_management.data_reader import get_ICM_train, get_UCM_train, get_ignore_users, get_ICM_train_new
from src.model.Ensemble.Boosting.boosting_preprocessing import get_label_array, preprocess_dataframe_after_reading
from src.tuning.holdout_validation.run_xgboost_tuning import run_xgb_tuning
from src.utils.general_utility_functions import get_split_seed
if __name__ == '__main__':
# Data loading
root_data_path = "../../data/"
data_reader = RecSys2019Reader(root_data_path)
data_reader = New_DataSplitter_leave_k_out(data_reader, k_out_value=3, use_validation_set=False,
force_new_split=True, seed=get_split_seed())
data_reader.load_data()
URM_train, URM_test = data_reader.get_holdout_split()
# Reading the dataframe
dataframe_path = "../../resources/boosting_dataframe/"
train_df = pd.read_csv(dataframe_path + "train_df_100_advanced_lt_20.csv")
valid_df = pd.read_csv(dataframe_path + "valid_df_30_advanced_lt_20.csv")
train_df = preprocess_dataframe_after_reading(train_df)
y_train = train_df['label'].values + 1
train_df = train_df.drop(columns=["label"], inplace=False)
valid_df = preprocess_dataframe_after_reading(valid_df)
print("Retrieving training labels...", end="")
_, non_zero_count, total = get_label_array(data_frame=train_df, URM_train=URM_train)
print("Done")
# Setting evaluator
mapper = data_reader.get_original_user_id_to_index_mapper()
ignore_users = get_ignore_users(URM_train, mapper, lower_threshold=20, upper_threshold=2 ** 16 - 1,
ignore_non_target_users=True)
evaluator = EvaluatorHoldout(URM_test, cutoff_list=[10], ignore_users=ignore_users)
total_users = np.arange(URM_train.shape[0])
mask = | np.in1d(total_users, ignore_users, invert=True) | numpy.in1d |
#!/usr/bin/python
'''
Code for interacting with the positioner microcontroller
'''
import serial
import time
import struct
import numpy as np
import re
from riglib.experiment import Experiment, Sequence, FSMTable, StateTransitions
import random
import socket
import select
dir_lut = dict(x={0:0, -1:0, 1:1},
y={0:0, -1:0, 1:1},
z={0:1, -1:1, 1:0}, # convention flipped for z-stage
)
class Positioner(object):
def __init__(self, dev='/dev/arduino_positioner'):
self.port = serial.Serial(dev, baudrate=115200)
self.port.flushInput()
def _parse_resp(self, resp):
resp = resp.rstrip()
limits = list(map(int, resp[-6:]))
return limits
def poll_limit_switches(self, N=100):
while 1:
time.sleep(0.1)
self.port.write('\n')
raw_resp = self.port.readline()
print("limit switches", self._parse_resp(raw_resp))
def read_limit_switches(self):
self.port.write('\n')
raw_resp = self.port.readline()
return self._parse_resp(raw_resp)
def wake_motors(self):
self.port.write('w\n')
# self.port.readline()
def sleep_motors(self):
print("sleep motors")
self.port.write('s\n')
# self.port.readline()
def step_motors(self, step_x, step_y, step_z, dir_x, dir_y, dir_z):
cmd_data = 0
cmd_step_data = step_x | (step_y << 1) | (step_z << 2)
cmd_dir_data = dir_x | (dir_y << 1) | (dir_z << 2)
cmd_data = cmd_step_data | (cmd_dir_data << 4)
cmd = 'm' + struct.pack('B', cmd_data) + '\n'
#print cmd_data, cmd
self.port.write(cmd)
def move(self, n_steps_x, n_steps_y, n_steps_z):
self.wake_motors()
limits = self._parse_resp(self.port.readline())
time.sleep(1)
dir_x = dir_lut['x'][np.sign(n_steps_x)]
dir_y = dir_lut['y'][np.sign(n_steps_y)]
dir_z = dir_lut['z'][np.sign(n_steps_z)]
n_steps_sent_x = 0
n_steps_sent_y = 0
n_steps_sent_z = 0
k = 0
while (abs(n_steps_x) > n_steps_sent_x) or (abs(n_steps_y) > n_steps_sent_y) or (abs(n_steps_z) > n_steps_sent_z):
if k % 10 == 0: print(k)
step_x = int(n_steps_sent_x < abs(n_steps_x))
step_y = int(n_steps_sent_y < abs(n_steps_y))
step_z = int(n_steps_sent_z < abs(n_steps_z))
#print step_x, step_y, step_z, dir_x, dir_y, dir_z
self.step_motors(step_x, step_y, step_z, dir_x, dir_y, dir_z)
limits = self._parse_resp(self.port.readline())
k += 1
n_steps_sent_x += step_x
n_steps_sent_y += step_y
n_steps_sent_z += step_z
self.sleep_motors()
def old_move(self, n_steps_x, n_steps_y, n_steps_z):
self.wake_motors()
try:
time.sleep(1)
dir_x = dir_lut['x'][np.sign(n_steps_x)]
dir_y = dir_lut['y'][np.sign(n_steps_y)]
dir_z = dir_lut['z'][np.sign(n_steps_z)]
n_steps_sent_x = 0
n_steps_sent_y = 0
n_steps_sent_z = 0
k = 0
while (abs(n_steps_x) > n_steps_sent_x) or (abs(n_steps_y) > n_steps_sent_y) or (abs(n_steps_z) > n_steps_sent_z):
if k % 10 == 0: print(k)
step_x = int(n_steps_sent_x < abs(n_steps_x))
step_y = int(n_steps_sent_y < abs(n_steps_y))
step_z = int(n_steps_sent_z < abs(n_steps_z))
#print step_x, step_y, step_z, dir_x, dir_y, dir_z
self.step_motors(step_x, step_y, step_z, dir_x, dir_y, dir_z)
limits = self._parse_resp(self.port.readline())
k += 1
n_steps_sent_x += step_x
n_steps_sent_y += step_y
n_steps_sent_z += step_z
except:
import traceback
traceback.print_exc()
finally:
self.sleep_motors()
def move2(self):
self.wake_motors()
for k in range(200):
self.step_motors(1,0,0,0,0,0)
limits = self._parse_resp(self.port.readline())
self.sleep_motors()
def go_to_min(self, verbose=False):
can_move = self.read_limit_switches()
x_can_decrease = can_move[0]
y_can_decrease = can_move[2]
z_can_decrease = can_move[4]
dir_x = dir_lut['x'][-1]
dir_y = dir_lut['y'][-1]
dir_z = dir_lut['z'][-1]
n_steps_sent_x = 0
n_steps_sent_y = 0
n_steps_sent_z = 0
if x_can_decrease or y_can_decrease or z_can_decrease:
self.wake_motors()
try:
k = 0
while x_can_decrease or y_can_decrease or z_can_decrease:
step_x = int(x_can_decrease)
step_y = int(y_can_decrease)
step_z = int(z_can_decrease)
if verbose:
print(step_x, step_y, step_z)
self.step_motors(step_x, step_y, step_z, dir_x, dir_y, dir_z)
can_move = self._parse_resp(self.port.readline())
x_can_decrease = can_move[0]
y_can_decrease = can_move[2]
z_can_decrease = can_move[4]
n_steps_sent_x += step_x
n_steps_sent_y += step_y
n_steps_sent_z += step_z
k += 1
finally:
self.sleep_motors()
return n_steps_sent_x, n_steps_sent_y, n_steps_sent_z
def go_to_max(self, verbose=False):
can_move = self.read_limit_switches()
x_can_increase = can_move[1]
y_can_increase = can_move[3]
z_can_increase = can_move[5]
dir_x = dir_lut['x'][1]
dir_y = dir_lut['y'][1]
dir_z = dir_lut['z'][1]
n_steps_sent_x = 0
n_steps_sent_y = 0
n_steps_sent_z = 0
if x_can_increase or y_can_increase or z_can_increase:
self.wake_motors()
try:
k = 0
while x_can_increase or y_can_increase or z_can_increase:
step_x = int(x_can_increase)
step_y = int(y_can_increase)
step_z = int(z_can_increase)
if verbose:
print(step_x, step_y, step_z)
self.step_motors(step_x, step_y, step_z, dir_x, dir_y, dir_z)
can_move = self._parse_resp(self.port.readline())
x_can_increase = can_move[1]
y_can_increase = can_move[3]
z_can_increase = can_move[5]
n_steps_sent_x += step_x
n_steps_sent_y += step_y
n_steps_sent_z += step_z
k += 1
finally:
self.sleep_motors()
return n_steps_sent_x, n_steps_sent_y, n_steps_sent_z
def continuous_move(self, n_steps_x, n_steps_y, n_steps_z):
self.start_continuous_move(n_steps_x, n_steps_y, n_steps_z)
return self.end_continuous_move()
def start_continuous_move(self, n_steps_x, n_steps_y, n_steps_z):
'''
Same as 'continuous_move', but without blocking for a response/movement to finish before the function returns
'''
self.wake_motors()
msg = 'c' + struct.pack('>hhh', n_steps_x, n_steps_y, n_steps_z) + '\n'
self.port.write(msg)
self.motor_dir = np.array([np.sign(n_steps_x), np.sign(n_steps_y), np.sign(n_steps_z)])
def end_continuous_move(self, stiff=False):
'''
Cleanup part of 'continuous_move' after 'start_continuous_move' has been called
'''
movement_data = self.port.readline()
try:
m = re.match(".*?: (\d+), (\d+), (\d+)", movement_data)
n_steps_actuated = list(map(int, [m.group(x) for x in [1,2,3]]))
except:
import traceback
traceback.print_exc()
print(movement_data)
if not stiff:
self.sleep_motors()
return n_steps_actuated
def calibrate(self, n_runs):
'''
Repeatedly go from min to max and back so the number of steps can be counted
'''
n_steps_min_to_max = [None]*n_runs
n_steps_max_to_min = [None]*n_runs
self.go_to_min()
time.sleep(1)
for k in range(n_runs):
n_steps_min_to_max[k] = self.go_to_max()
time.sleep(2)
n_steps_max_to_min[k] = self.go_to_min()
print("min to max")
print(n_steps_min_to_max)
print("max to min")
print(n_steps_max_to_min)
time.sleep(2)
return n_steps_min_to_max, n_steps_max_to_min
def data_available(self):
return self.port.inWaiting()
# from features.generator_features import Autostart
class PositionerTaskController(Sequence):
'''
Interface between the positioner and the task interface. The positioner should run asynchronously
so that the task event loop does not have to wait for a serial port response from the microcontroller.
'''
status = FSMTable(
go_to_origin = StateTransitions(microcontroller_done='wait'),
wait = StateTransitions(start_trial='move_target'),
move_target = StateTransitions(microcontroller_done='reach', stoppable=False),
reach = StateTransitions(time_expired='reward', new_target_set_remotely='move_target'),
reward = StateTransitions(time_expired='wait'),
)
# status = dict(
# go_to_origin = dict(microcontroller_done='wait', stop=None),
# wait = dict(start_trial='move_target', stop=None),
# move_target = dict(microcontroller_done='reach'),
# reach = dict(time_expired='reward', stop=None),
# reward = dict(time_expired='wait'),
# )
state = 'go_to_origin'
sequence_generators = ['random_target_calibration', 'xy_sweep']
reward_time = 1
reach_time = 1
@staticmethod
def random_target_calibration(n_blocks=10):
# # constants selected approximately from one subject's ROM
# targets = [
# (x_min, y_min, z_min),
# (x_max, y_min, z_min),
# (x_min, y_max, z_min),
# (x_min, y_min, z_max),
# (x_max, y_max, z_min),
# (x_max, y_min, z_max),
# (x_min, y_max, z_max),
# (x_max, y_max, z_max),
# ]
# trial_target_ls = []
# for k in range(n_blocks):
# random.shuffle(targets)
# for targ in targets:
# trial_target_ls.append(dict(int_target_pos=targ))
# # set the last target to be the origin since the purpose of this generator is to measure the drift in # of steps
# trial_target_ls.append(dict(int_target_pos=np.zeros(3)))
# return trial_target_ls
# @staticmethod
# def calibration_targets(nblocks=1):
targets = [
(45, 34, 0),
(50, 38, -25),
(40, 35, 0),
(40, 35, -25),
(30, 29, 0),
(30, 29, -25),
(20, 35, 0),
(20, 35, -25),
# (10, 38, 0), # reachable?
# (10, 38, -25), # reachable?
]
trial_target_ls = []
for k in range(n_blocks):
random.shuffle(targets)
for targ in targets:
trial_target_ls.append(dict(int_target_pos=targ))
# set the last target to be the origin since the purpose of this generator is to measure the drift in # of steps
trial_target_ls.append(dict(int_target_pos=np.zeros(3)))
return trial_target_ls
@staticmethod
def xy_sweep(z_min=-25, z_max=0, zpts=6):
xy_target_locs = np.vstack([
[8.20564516129, 37.6302083333],
[9.61693548387, 34.1145833333],
[15.1209677419, 31.1848958333],
[15.5443548387, 34.5703125],
[18.2258064516, 36.5234375],
[23.4475806452, 34.7005208333],
[22.8830645161, 32.3567708333],
[23.0241935484, 29.1666666667],
[28.9516129032, 34.8307291667],
[28.9516129032, 32.2265625],
[29.2338709677, 30.1432291667],
[33.3266129032, 35.4166666667],
[33.8911290323, 33.1380208333],
[30.5040322581, 30.078125],
[20.4838709677, 28.1901041667],
[35.5846774194, 36.5885416667],
[39.2540322581, 33.5286458333],
[41.5120967742, 38.5416666667],
[47.439516129, 37.6953125],
])
trial_target_ls = []
z_range = np.linspace(z_min, z_max, zpts)
for zpt in z_range:
for xy_targ in xy_target_locs:
trial_target_ls.append(dict(int_target_pos=np.hstack([xy_targ, zpt])))
return trial_target_ls
def __init__(self, *args, **kwargs):
'''
Constructor for PositionerTaskController
Parameters
----------
# x_len : float
# measured distance the positioner can travel in the x-dimension
# y_len : float
# measured distance the positioner can travel in the y-dimension
# z_len : float
# measured distance the positioner can travel in the z-dimension
dev : str, optional, default=/dev/ttyACM1
Serial port to use to communicate with Arduino controller
x_cm_per_rev : int, optional, default=12
Number of cm traveled for one full revolution of the stepper motors in the x-dimension
y_cm_per_rev : int, optional, default=12
Number of cm traveled for one full revolution of the stepper motors in the y-dimension
z_cm_per_rev : float, optional, default=7.6
Number of cm traveled for one full revolution of the stepper motors in the z-dimension
x_step_size : float, optional, default=0.25
Microstepping mode in the x-dimension
y_step_size : float, optional, default=0.25
Microstepping mode in the y-dimension
z_step_size : float, optional, default=0.25
Microstepping mode in the z-dimension
Returns
-------
PositionerTaskController instance
'''
# TODO make these input arguments
positioner_dev = '/dev/arduino_positioner'
# cm/rev based on measured data
x_cm_per_rev = 12.4
y_cm_per_rev = 12.4
z_cm_per_rev = 8.0
x_step_size = 1./4
y_step_size = 1./4
z_step_size = 1./4
self.loc = | np.ones(3) | numpy.ones |
import argparse
from distutils.util import strtobool
import json
import os
import pickle
import numpy as np
import tensorflow as tf
import pdb
from softlearning.environments.utils import get_environment_from_params
from softlearning.policies.utils import get_policy_from_variant
# from softlearning.samplers import rollouts
from softlearning import replay_pools
from softlearning.samplers import (
dummy_sampler,
extra_policy_info_sampler,
remote_sampler,
base_sampler,
simple_sampler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default=None,
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def rollout(env,
policy,
path_length,
callback=None,
render_mode=None,
break_on_terminal=True):
observation_space = env.observation_space
action_space = env.action_space
pool = replay_pools.SimpleReplayPool(
observation_space, action_space, max_size=path_length)
sampler = simple_sampler.SimpleSampler(
max_path_length=path_length,
min_pool_size=None,
batch_size=None)
sampler.initialize(env, policy, pool)
images = []
infos = []
state_vectors = []
t = 0
for t in range(path_length):
observation, reward, terminal, info = sampler.sample()
state_vector = sampler.env.unwrapped.state_vector()
infos.append(info)
state_vectors.append(state_vector)
if callback is not None:
callback(observation)
if render_mode is not None:
if render_mode == 'rgb_array':
image = env.render(mode=render_mode)
images.append(image)
else:
env.render()
if terminal:
policy.reset()
if break_on_terminal: break
assert pool._size == t + 1
path = pool.batch_by_indices(
np.arange(pool._size),
observation_keys=getattr(env, 'observation_keys', None))
path['infos'] = infos
path['state_vectors'] = | np.array([sampler._reset_state_vector] + state_vectors[:-1]) | numpy.array |
from __future__ import print_function, division, absolute_import
import copy as copylib
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.testutils import reseed
import imgaug.random as iarandom
NP_VERSION = np.__version__
IS_NP_117_OR_HIGHER = (
NP_VERSION.startswith("2.")
or NP_VERSION.startswith("1.25")
or NP_VERSION.startswith("1.24")
or NP_VERSION.startswith("1.23")
or NP_VERSION.startswith("1.22")
or NP_VERSION.startswith("1.21")
or NP_VERSION.startswith("1.20")
or NP_VERSION.startswith("1.19")
or NP_VERSION.startswith("1.18")
or NP_VERSION.startswith("1.17")
)
class _Base(unittest.TestCase):
def setUp(self):
reseed()
class TestConstants(_Base):
def test_supports_new_np_rng_style_is_true(self):
assert iarandom.SUPPORTS_NEW_NP_RNG_STYLE is IS_NP_117_OR_HIGHER
def test_global_rng(self):
iarandom.get_global_rng() # creates global RNG upon first call
assert iarandom.GLOBAL_RNG is not None
class TestRNG(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test___init___calls_normalize_mocked(self, mock_norm):
_ = iarandom.RNG(0)
mock_norm.assert_called_once_with(0)
def test___init___with_rng(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng2.generator is rng1.generator
@mock.patch("imgaug.random.get_generator_state")
def test_state_getter_mocked(self, mock_get):
mock_get.return_value = "mock"
rng = iarandom.RNG(0)
result = rng.state
assert result == "mock"
mock_get.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.RNG.set_state_")
def test_state_setter_mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
rng.state = state
mock_set.assert_called_once_with(state)
@mock.patch("imgaug.random.set_generator_state_")
def test_set_state__mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
result = rng.set_state_(state)
assert result is rng
mock_set.assert_called_once_with(rng.generator, state)
@mock.patch("imgaug.random.set_generator_state_")
def test_use_state_of__mocked(self, mock_set):
rng1 = iarandom.RNG(0)
rng2 = mock.MagicMock()
state = {"foo"}
rng2.state = state
result = rng1.use_state_of_(rng2)
assert result == rng1
mock_set.assert_called_once_with(rng1.generator, state)
@mock.patch("imgaug.random.get_global_rng")
def test_is_global__is_global__rng_mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1.generator)
mock_get.return_value = rng2
assert rng1.is_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_is_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
# different instance with same state/seed should still be viewed as
# different by the method
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.is_global_rng() is False
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is False
@mock.patch("imgaug.random.generate_seed_")
def test_generate_seed__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = -1
seed = rng.generate_seed_()
assert seed == -1
mock_gen.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.generate_seeds_")
def test_generate_seeds__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = [-1, -2]
seeds = rng.generate_seeds_(2)
assert seeds == [-1, -2]
mock_gen.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.reset_generator_cache_")
def test_reset_cache__mocked(self, mock_reset):
rng = iarandom.RNG(0)
result = rng.reset_cache_()
assert result is rng
mock_reset.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rng__mocked(self, mock_derive):
gen = iarandom.convert_seed_to_generator(0)
mock_derive.return_value = [gen]
rng = iarandom.RNG(0)
result = rng.derive_rng_()
assert result.generator is gen
mock_derive.assert_called_once_with(rng.generator, 1)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rngs__mocked(self, mock_derive):
gen1 = iarandom.convert_seed_to_generator(0)
gen2 = iarandom.convert_seed_to_generator(1)
mock_derive.return_value = [gen1, gen2]
rng = iarandom.RNG(0)
result = rng.derive_rngs_(2)
assert result[0].generator is gen1
assert result[1].generator is gen2
mock_derive.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.is_generator_equal_to")
def test_equals_mocked(self, mock_equal):
mock_equal.return_value = "foo"
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
result = rng1.equals(rng2)
assert result == "foo"
mock_equal.assert_called_once_with(rng1.generator, rng2.generator)
def test_equals_identical_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng1.equals(rng2)
def test_equals_with_similar_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
assert rng1.equals(rng2)
def test_equals_with_different_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
assert not rng1.equals(rng2)
def test_equals_with_advanced_generator(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
rng2.advance_()
assert not rng1.equals(rng2)
@mock.patch("imgaug.random.advance_generator_")
def test_advance__mocked(self, mock_advance):
rng = iarandom.RNG(0)
result = rng.advance_()
assert result is rng
mock_advance.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.copy_generator")
def test_copy_mocked(self, mock_copy):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_copy.return_value = rng2.generator
result = rng1.copy()
assert result.generator is rng2.generator
mock_copy.assert_called_once_with(rng1.generator)
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = True
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is rng
mock_is_global.assert_called_once_with()
assert mock_copy.call_count == 0
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_not_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = False
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is "foo"
mock_is_global.assert_called_once_with()
mock_copy.assert_called_once_with()
def test_duplicate(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(1)
assert rngs == [rng]
def test_duplicate_two_entries(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(2)
assert rngs == [rng, rng]
@mock.patch("imgaug.random.create_fully_random_generator")
def test_create_fully_random_mocked(self, mock_create):
gen = iarandom.convert_seed_to_generator(0)
mock_create.return_value = gen
rng = iarandom.RNG.create_fully_random()
mock_create.assert_called_once_with()
assert rng.generator is gen
@mock.patch("imgaug.random.derive_generators_")
def test_create_pseudo_random__mocked(self, mock_get):
rng_glob = iarandom.get_global_rng()
rng = iarandom.RNG(0)
mock_get.return_value = [rng.generator]
result = iarandom.RNG.create_pseudo_random_()
assert result.generator is rng.generator
mock_get.assert_called_once_with(rng_glob.generator, 1)
@mock.patch("imgaug.random.polyfill_integers")
def test_integers_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
result = rng.integers(low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
@mock.patch("imgaug.random.polyfill_random")
def test_random_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
out = np.zeros((1,), dtype="float64")
result = rng.random(size=(1,), dtype="float64", out=out)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, size=(1,), dtype="float64", out=out)
# TODO below test for generator methods are all just mock-based, add
# non-mocked versions
def test_choice_mocked(self):
self._test_sampling_func("choice", a=[1, 2, 3], size=(1,),
replace=False, p=[0.1, 0.2, 0.7])
def test_bytes_mocked(self):
self._test_sampling_func("bytes", length=[10])
def test_shuffle_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng.shuffle([1, 2, 3])
mock_gen.shuffle.assert_called_once_with([1, 2, 3])
def test_permutation_mocked(self):
mock_gen = mock.MagicMock()
rng = iarandom.RNG(0)
rng.generator = mock_gen
mock_gen.permutation.return_value = "foo"
result = rng.permutation([1, 2, 3])
assert result == "foo"
mock_gen.permutation.assert_called_once_with([1, 2, 3])
def test_beta_mocked(self):
self._test_sampling_func("beta", a=1.0, b=2.0, size=(1,))
def test_binomial_mocked(self):
self._test_sampling_func("binomial", n=10, p=0.1, size=(1,))
def test_chisquare_mocked(self):
self._test_sampling_func("chisquare", df=2, size=(1,))
def test_dirichlet_mocked(self):
self._test_sampling_func("dirichlet", alpha=0.1, size=(1,))
def test_exponential_mocked(self):
self._test_sampling_func("exponential", scale=1.1, size=(1,))
def test_f_mocked(self):
self._test_sampling_func("f", dfnum=1, dfden=2, size=(1,))
def test_gamma_mocked(self):
self._test_sampling_func("gamma", shape=1, scale=1.2, size=(1,))
def test_geometric_mocked(self):
self._test_sampling_func("geometric", p=0.5, size=(1,))
def test_gumbel_mocked(self):
self._test_sampling_func("gumbel", loc=0.1, scale=1.1, size=(1,))
def test_hypergeometric_mocked(self):
self._test_sampling_func("hypergeometric", ngood=2, nbad=4, nsample=6,
size=(1,))
def test_laplace_mocked(self):
self._test_sampling_func("laplace", loc=0.5, scale=1.5, size=(1,))
def test_logistic_mocked(self):
self._test_sampling_func("logistic", loc=0.5, scale=1.5, size=(1,))
def test_lognormal_mocked(self):
self._test_sampling_func("lognormal", mean=0.5, sigma=1.5, size=(1,))
def test_logseries_mocked(self):
self._test_sampling_func("logseries", p=0.5, size=(1,))
def test_multinomial_mocked(self):
self._test_sampling_func("multinomial", n=5, pvals=0.5, size=(1,))
def test_multivariate_normal_mocked(self):
self._test_sampling_func("multivariate_normal", mean=0.5, cov=1.0,
size=(1,), check_valid="foo", tol=1e-2)
def test_negative_binomial_mocked(self):
self._test_sampling_func("negative_binomial", n=10, p=0.5, size=(1,))
def test_noncentral_chisquare_mocked(self):
self._test_sampling_func("noncentral_chisquare", df=0.5, nonc=1.0,
size=(1,))
def test_noncentral_f_mocked(self):
self._test_sampling_func("noncentral_f", dfnum=0.5, dfden=1.5,
nonc=2.0, size=(1,))
def test_normal_mocked(self):
self._test_sampling_func("normal", loc=0.5, scale=1.0, size=(1,))
def test_pareto_mocked(self):
self._test_sampling_func("pareto", a=0.5, size=(1,))
def test_poisson_mocked(self):
self._test_sampling_func("poisson", lam=1.5, size=(1,))
def test_power_mocked(self):
self._test_sampling_func("power", a=0.5, size=(1,))
def test_rayleigh_mocked(self):
self._test_sampling_func("rayleigh", scale=1.5, size=(1,))
def test_standard_cauchy_mocked(self):
self._test_sampling_func("standard_cauchy", size=(1,))
def test_standard_exponential_np117_mocked(self):
fname = "standard_exponential"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_exponential_np116_mocked(self):
fname = "standard_exponential"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "method": "foo",
"out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_gamma_np117_mocked(self):
fname = "standard_gamma"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_gamma_np116_mocked(self):
fname = "standard_gamma"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"shape": 1.0, "size": (1,), "dtype": "float16",
"out": arr_out}
kwargs_subcall = {"shape": 1.0, "size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_normal_np117_mocked(self):
fname = "standard_normal"
arr = np.zeros((1,), dtype="float16")
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr}
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = True
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
def test_standard_normal_np116_mocked(self):
fname = "standard_normal"
arr_out = np.zeros((1,), dtype="float16")
arr_result = np.ones((1,), dtype="float16")
def _side_effect(x):
return arr_result
args = []
kwargs = {"size": (1,), "dtype": "float16", "out": arr_out}
kwargs_subcall = {"size": (1,)}
mock_gen = mock.MagicMock()
mock_gen.astype.side_effect = _side_effect
getattr(mock_gen, fname).return_value = mock_gen
rng = iarandom.RNG(0)
rng.generator = mock_gen
rng._is_new_rng_style = False
result = getattr(rng, fname)(*args, **kwargs)
getattr(mock_gen, fname).assert_called_once_with(*args,
**kwargs_subcall)
mock_gen.astype.assert_called_once_with("float16")
assert np.allclose(result, arr_result)
assert np.allclose(arr_out, arr_result)
def test_standard_t_mocked(self):
self._test_sampling_func("standard_t", df=1.5, size=(1,))
def test_triangular_mocked(self):
self._test_sampling_func("triangular", left=1.0, mode=1.5, right=2.0,
size=(1,))
def test_uniform_mocked(self):
self._test_sampling_func("uniform", low=0.5, high=1.5, size=(1,))
def test_vonmises_mocked(self):
self._test_sampling_func("vonmises", mu=1.0, kappa=1.5, size=(1,))
def test_wald_mocked(self):
self._test_sampling_func("wald", mean=0.5, scale=1.0, size=(1,))
def test_weibull_mocked(self):
self._test_sampling_func("weibull", a=1.0, size=(1,))
def test_zipf_mocked(self):
self._test_sampling_func("zipf", a=1.0, size=(1,))
@classmethod
def _test_sampling_func(cls, fname, *args, **kwargs):
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
#
# outdated methods from RandomState
#
def test_rand_mocked(self):
self._test_sampling_func_alias("rand", "random", 1, 2, 3)
def test_randint_mocked(self):
self._test_sampling_func_alias("randint", "integers", 0, 100)
def randn(self):
self._test_sampling_func_alias("randn", "standard_normal", 1, 2, 3)
def random_integers(self):
self._test_sampling_func_alias("random_integers", "integers", 1, 2)
def random_sample(self):
self._test_sampling_func_alias("random_sample", "uniform", (1, 2, 3))
def tomaxint(self):
self._test_sampling_func_alias("tomaxint", "integers", (1, 2, 3))
def test_rand(self):
result = iarandom.RNG(0).rand(10, 20, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 0.0)
assert np.all(result <= 1.0)
assert np.any(result > 0.0)
assert np.any(result < 1.0)
def test_randint(self):
result = iarandom.RNG(0).randint(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 10)
assert np.all(result <= 99)
assert np.any(result > 10)
assert np.any(result < 99)
def test_randn(self):
result = iarandom.RNG(0).randn(10, 50, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 50, 3)
assert np.any(result > 0.5)
assert np.any(result < -0.5)
assert np.average(np.logical_or(result < 2.0, result > -2.0)) > 0.5
def test_random_integers(self):
result = iarandom.RNG(0).random_integers(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 10)
assert np.all(result <= 100)
assert np.any(result > 10)
assert np.any(result < 100)
def test_random_integers__no_high(self):
result = iarandom.RNG(0).random_integers(100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 1)
assert np.all(result <= 100)
assert np.any(result > 1)
assert np.any(result < 100)
def test_random_sample(self):
result = iarandom.RNG(0).random_sample((10, 20, 3))
assert result.dtype.name == "float64"
assert result.shape == (10, 20, 3)
assert np.all(result >= 0.0)
assert np.all(result <= 1.0)
assert np.any(result > 0.0)
assert np.any(result < 1.0)
def test_tomaxint(self):
result = iarandom.RNG(0).tomaxint((10, 200, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 200, 3)
assert np.all(result >= 0)
assert np.any(result > 10000)
@classmethod
def _test_sampling_func_alias(cls, fname_alias, fname_subcall, *args,
**kwargs):
rng = iarandom.RNG(0)
mock_func = mock.Mock()
mock_func.return_value = "foo"
setattr(rng, fname_subcall, mock_func)
result = getattr(rng, fname_alias)(*args, **kwargs)
assert result == "foo"
assert mock_func.call_count == 1
class Test_supports_new_numpy_rng_style(_Base):
def test_call(self):
assert iarandom.supports_new_numpy_rng_style() is IS_NP_117_OR_HIGHER
class Test_get_global_rng(_Base):
def test_call(self):
iarandom.seed(0)
rng = iarandom.get_global_rng()
expected = iarandom.RNG(0)
assert rng is not None
assert rng.equals(expected)
class Test_seed(_Base):
@mock.patch("imgaug.random._seed_np117_")
@mock.patch("imgaug.random._seed_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
iarandom.seed(1)
if IS_NP_117_OR_HIGHER:
mock_np117.assert_called_once_with(1)
assert mock_np116.call_count == 0
else:
mock_np116.assert_called_once_with(1)
assert mock_np117.call_count == 0
def test_integrationtest(self):
iarandom.seed(1)
assert iarandom.GLOBAL_RNG.equals(iarandom.RNG(1))
def test_seed_affects_augmenters_created_after_its_call(self):
image = np.full((50, 50, 3), 128, dtype=np.uint8)
images_aug = []
for _ in np.arange(5):
iarandom.seed(100)
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
images_aug.append(aug(image=image))
# assert all images identical
for other_image_aug in images_aug[1:]:
assert np.array_equal(images_aug[0], other_image_aug)
# but different seed must lead to different image
iarandom.seed(101)
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
image_aug = aug(image=image)
assert not np.array_equal(images_aug[0], image_aug)
def test_seed_affects_augmenters_created_before_its_call(self):
image = np.full((50, 50, 3), 128, dtype=np.uint8)
images_aug = []
for _ in np.arange(5):
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
iarandom.seed(100)
images_aug.append(aug(image=image))
# assert all images identical
for other_image_aug in images_aug[1:]:
assert np.array_equal(images_aug[0], other_image_aug)
# but different seed must lead to different image
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
iarandom.seed(101)
image_aug = aug(image=image)
assert not np.array_equal(images_aug[0], image_aug)
class Test_normalize_generator(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test_mocked_call(self, mock_subfunc):
mock_subfunc.return_value = "foo"
inputs = ["bar"]
result = iarandom.normalize_generator(inputs)
assert mock_subfunc.call_count == 1
assert mock_subfunc.call_args[0][0] is not inputs
assert mock_subfunc.call_args[0][0] == inputs
assert result == "foo"
class Test_normalize_generator_(_Base):
@mock.patch("imgaug.random._normalize_generator_np117_")
@mock.patch("imgaug.random._normalize_generator_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.normalize_generator_(None)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(None)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(None)
assert mock_np117.call_count == 0
def test_called_with_none(self):
result = iarandom.normalize_generator_(None)
assert result is iarandom.get_global_rng().generator
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"SeedSequence does not exist in numpy <=1.16")
def test_called_with_seed_sequence(self):
seedseq = np.random.SeedSequence(0)
result = iarandom.normalize_generator_(seedseq)
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(0)))
assert iarandom.is_generator_equal_to(result, expected)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"BitGenerator does not exist in numpy <=1.16")
def test_called_with_bit_generator(self):
bgen = iarandom.BIT_GENERATOR(np.random.SeedSequence(0))
result = iarandom.normalize_generator_(bgen)
assert result.bit_generator is bgen
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Generator does not exist in numpy <=1.16")
def test_called_with_generator(self):
gen = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(0))
)
result = iarandom.normalize_generator_(gen)
assert result is gen
def test_called_with_random_state(self):
rs = np.random.RandomState(0)
result = iarandom.normalize_generator_(rs)
if IS_NP_117_OR_HIGHER:
seed = iarandom.generate_seed_(np.random.RandomState(0))
expected = iarandom.convert_seed_to_generator(seed)
assert iarandom.is_generator_equal_to(result, expected)
else:
assert result is rs
def test_called_int(self):
seed = 0
result = iarandom.normalize_generator_(seed)
expected = iarandom.convert_seed_to_generator(seed)
assert iarandom.is_generator_equal_to(result, expected)
class Test_convert_seed_to_generator(_Base):
@mock.patch("imgaug.random._convert_seed_to_generator_np117")
@mock.patch("imgaug.random._convert_seed_to_generator_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.convert_seed_to_generator(1)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(1)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(1)
assert mock_np117.call_count == 0
def test_call(self):
gen = iarandom.convert_seed_to_generator(1)
if IS_NP_117_OR_HIGHER:
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(1)))
assert iarandom.is_generator_equal_to(gen, expected)
else:
expected = np.random.RandomState(1)
assert iarandom.is_generator_equal_to(gen, expected)
class Test_convert_seed_sequence_to_generator(_Base):
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"SeedSequence does not exist in numpy <=1.16")
def test_call(self):
seedseq = np.random.SeedSequence(1)
gen = iarandom.convert_seed_sequence_to_generator(seedseq)
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(1)))
assert iarandom.is_generator_equal_to(gen, expected)
class Test_create_pseudo_random_generator_(_Base):
def test_call(self):
global_gen = copylib.deepcopy(iarandom.get_global_rng().generator)
gen = iarandom.create_pseudo_random_generator_()
expected = iarandom.convert_seed_to_generator(
iarandom.generate_seed_(global_gen))
assert iarandom.is_generator_equal_to(gen, expected)
class Test_create_fully_random_generator(_Base):
@mock.patch("imgaug.random._create_fully_random_generator_np117")
@mock.patch("imgaug.random._create_fully_random_generator_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.create_fully_random_generator()
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with()
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with()
assert mock_np117.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_np117_mocked(self):
dummy_bitgen = np.random.SFC64(1)
with mock.patch("numpy.random.SFC64") as mock_bitgen:
mock_bitgen.return_value = dummy_bitgen
result = iarandom._create_fully_random_generator_np117()
assert mock_bitgen.call_count == 1
assert iarandom.is_generator_equal_to(
result, np.random.Generator(dummy_bitgen))
def test_np116_mocked(self):
dummy_rs = np.random.RandomState(1)
with mock.patch("numpy.random.RandomState") as mock_rs:
mock_rs.return_value = dummy_rs
result = iarandom._create_fully_random_generator_np116()
assert mock_rs.call_count == 1
assert iarandom.is_generator_equal_to(result, np.random.RandomState(1))
class Test_generate_seed_(_Base):
@mock.patch("imgaug.random.generate_seeds_")
def test_mocked_call(self, mock_seeds):
gen = iarandom.convert_seed_to_generator(0)
_ = iarandom.generate_seed_(gen)
mock_seeds.assert_called_once_with(gen, 1)
class Test_generate_seeds_(_Base):
@mock.patch("imgaug.random.polyfill_integers")
def test_mocked_call(self, mock_integers):
gen = iarandom.convert_seed_to_generator(0)
_ = iarandom.generate_seeds_(gen, 10)
mock_integers.assert_called_once_with(
gen, iarandom.SEED_MIN_VALUE, iarandom.SEED_MAX_VALUE, size=(10,))
def test_call(self):
gen = iarandom.convert_seed_to_generator(0)
seeds = iarandom.generate_seeds_(gen, 2)
assert len(seeds) == 2
assert ia.is_np_array(seeds)
assert seeds.dtype.name == "int32"
class Test_copy_generator(_Base):
@mock.patch("imgaug.random._copy_generator_np116")
def test_mocked_call_with_random_state(self, mock_np116):
mock_np116.return_value = "np116"
gen = np.random.RandomState(1)
gen_copy = iarandom.copy_generator(gen)
assert gen_copy == "np116"
mock_np116.assert_called_once_with(gen)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
@mock.patch("imgaug.random._copy_generator_np117")
def test_mocked_call_with_generator(self, mock_np117):
mock_np117.return_value = "np117"
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy = iarandom.copy_generator(gen)
assert gen_copy == "np117"
mock_np117.assert_called_once_with(gen)
def test_call_with_random_state(self):
gen = np.random.RandomState(1)
gen_copy = iarandom._copy_generator_np116(gen)
assert gen is not gen_copy
assert iarandom.is_generator_equal_to(gen, gen_copy)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_with_generator(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy = iarandom._copy_generator_np117(gen)
assert gen is not gen_copy
assert iarandom.is_generator_equal_to(gen, gen_copy)
class Test_copy_generator_unless_global_generator(_Base):
@mock.patch("imgaug.random.get_global_rng")
@mock.patch("imgaug.random.copy_generator")
def test_mocked_gen_is_global(self, mock_copy, mock_get_global_rng):
gen = iarandom.convert_seed_to_generator(1)
mock_copy.return_value = "foo"
mock_get_global_rng.return_value = iarandom.RNG(gen)
result = iarandom.copy_generator_unless_global_generator(gen)
assert mock_get_global_rng.call_count == 1
assert mock_copy.call_count == 0
assert result is gen
@mock.patch("imgaug.random.get_global_rng")
@mock.patch("imgaug.random.copy_generator")
def test_mocked_gen_is_not_global(self, mock_copy, mock_get_global_rng):
gen1 = iarandom.convert_seed_to_generator(1)
gen2 = iarandom.convert_seed_to_generator(2)
mock_copy.return_value = "foo"
mock_get_global_rng.return_value = iarandom.RNG(gen2)
result = iarandom.copy_generator_unless_global_generator(gen1)
assert mock_get_global_rng.call_count == 1
mock_copy.assert_called_once_with(gen1)
assert result == "foo"
class Test_reset_generator_cache_(_Base):
@mock.patch("imgaug.random._reset_generator_cache_np117_")
@mock.patch("imgaug.random._reset_generator_cache_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.reset_generator_cache_(gen)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(gen)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(gen)
assert mock_np117.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen = iarandom.convert_seed_to_generator(1)
gen_without_cache_copy = copylib.deepcopy(gen)
state = iarandom._get_generator_state_np117(gen)
state["has_uint32"] = 1
gen_with_cache = copylib.deepcopy(gen)
iarandom.set_generator_state_(gen_with_cache, state)
gen_with_cache_copy = copylib.deepcopy(gen_with_cache)
gen_cache_reset = iarandom.reset_generator_cache_(gen_with_cache)
assert iarandom.is_generator_equal_to(gen_cache_reset,
gen_without_cache_copy)
assert not iarandom.is_generator_equal_to(gen_cache_reset,
gen_with_cache_copy)
def test_call_np116(self):
gen = np.random.RandomState(1)
gen_without_cache_copy = copylib.deepcopy(gen)
state = iarandom._get_generator_state_np116(gen)
state = list(state)
state[-2] = 1
gen_with_cache = copylib.deepcopy(gen)
iarandom.set_generator_state_(gen_with_cache, tuple(state))
gen_with_cache_copy = copylib.deepcopy(gen_with_cache)
gen_cache_reset = iarandom.reset_generator_cache_(gen_with_cache)
assert iarandom.is_generator_equal_to(gen_cache_reset,
gen_without_cache_copy)
assert not iarandom.is_generator_equal_to(gen_cache_reset,
gen_with_cache_copy)
class Test_derive_generator_(_Base):
@mock.patch("imgaug.random.derive_generators_")
def test_mocked_call(self, mock_derive_gens):
mock_derive_gens.return_value = ["foo"]
gen = iarandom.convert_seed_to_generator(1)
gen_derived = iarandom.derive_generator_(gen)
mock_derive_gens.assert_called_once_with(gen, n=1)
assert gen_derived == "foo"
def test_integration(self):
gen = iarandom.convert_seed_to_generator(1)
gen_copy = copylib.deepcopy(gen)
gen_derived = iarandom.derive_generator_(gen)
assert not iarandom.is_generator_equal_to(gen_derived, gen_copy)
# should have advanced the state
assert not iarandom.is_generator_equal_to(gen_copy, gen)
class Test_derive_generators_(_Base):
@mock.patch("imgaug.random._derive_generators_np117_")
@mock.patch("imgaug.random._derive_generators_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.derive_generators_(gen, 1)
if isinstance(gen, np.random.RandomState):
assert result == "np116"
mock_np116.assert_called_once_with(gen, n=1)
assert mock_np117.call_count == 0
else:
assert result == "np117"
mock_np117.assert_called_once_with(gen, n=1)
assert mock_np116.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen = iarandom.convert_seed_to_generator(1)
gen_copy = copylib.deepcopy(gen)
result = iarandom.derive_generators_(gen, 2)
assert len(result) == 2
assert np.all([isinstance(gen, np.random.Generator)
for gen in result])
assert not iarandom.is_generator_equal_to(result[0], gen_copy)
assert not iarandom.is_generator_equal_to(result[1], gen_copy)
assert not iarandom.is_generator_equal_to(result[0], result[1])
# derive should advance state
assert not iarandom.is_generator_equal_to(gen, gen_copy)
def test_call_np116(self):
gen = np.random.RandomState(1)
gen_copy = copylib.deepcopy(gen)
result = iarandom.derive_generators_(gen, 2)
assert len(result) == 2
assert np.all([isinstance(gen, np.random.RandomState)
for gen in result])
assert not iarandom.is_generator_equal_to(result[0], gen_copy)
assert not iarandom.is_generator_equal_to(result[1], gen_copy)
assert not iarandom.is_generator_equal_to(result[0], result[1])
# derive should advance state
assert not iarandom.is_generator_equal_to(gen, gen_copy)
class Test_get_generator_state(_Base):
@mock.patch("imgaug.random._get_generator_state_np117")
@mock.patch("imgaug.random._get_generator_state_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.get_generator_state(gen)
if isinstance(gen, np.random.RandomState):
assert result == "np116"
mock_np116.assert_called_once_with(gen)
assert mock_np117.call_count == 0
else:
assert result == "np117"
mock_np117.assert_called_once_with(gen)
assert mock_np116.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen = iarandom.convert_seed_to_generator(1)
state = iarandom.get_generator_state(gen)
assert str(state) == str(gen.bit_generator.state)
def test_call_np116(self):
gen = np.random.RandomState(1)
state = iarandom.get_generator_state(gen)
assert str(state) == str(gen.get_state())
class Test_set_generator_state_(_Base):
@mock.patch("imgaug.random._set_generator_state_np117_")
@mock.patch("imgaug.random._set_generator_state_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
gen = iarandom.convert_seed_to_generator(1)
state = {"state": 0}
iarandom.set_generator_state_(gen, state)
if isinstance(gen, np.random.RandomState):
mock_np116.assert_called_once_with(gen, state)
assert mock_np117.call_count == 0
else:
mock_np117.assert_called_once_with(gen, state)
assert mock_np116.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen2 = np.random.Generator(iarandom.BIT_GENERATOR(2))
gen1_copy = copylib.deepcopy(gen1)
gen2_copy = copylib.deepcopy(gen2)
iarandom._set_generator_state_np117_(
gen2, iarandom.get_generator_state(gen1))
assert iarandom.is_generator_equal_to(gen2, gen1)
assert iarandom.is_generator_equal_to(gen1, gen1_copy)
assert not iarandom.is_generator_equal_to(gen2, gen2_copy)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117_via_samples(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen2 = np.random.Generator(iarandom.BIT_GENERATOR(2))
gen1_copy = copylib.deepcopy(gen1)
gen2_copy = copylib.deepcopy(gen2)
iarandom._set_generator_state_np117_(
gen2, iarandom.get_generator_state(gen1))
samples1 = gen1.random(size=(100,))
samples2 = gen2.random(size=(100,))
samples1_copy = gen1_copy.random(size=(100,))
samples2_copy = gen2_copy.random(size=(100,))
assert np.allclose(samples1, samples2)
assert np.allclose(samples1, samples1_copy)
assert not np.allclose(samples2, samples2_copy)
def test_call_np116(self):
gen1 = np.random.RandomState(1)
gen2 = np.random.RandomState(2)
gen1_copy = copylib.deepcopy(gen1)
gen2_copy = copylib.deepcopy(gen2)
iarandom._set_generator_state_np116_(
gen2, iarandom.get_generator_state(gen1))
assert iarandom.is_generator_equal_to(gen2, gen1)
assert iarandom.is_generator_equal_to(gen1, gen1_copy)
assert not iarandom.is_generator_equal_to(gen2, gen2_copy)
def test_call_np116_via_samples(self):
gen1 = np.random.RandomState(1)
gen2 = np.random.RandomState(2)
gen1_copy = copylib.deepcopy(gen1)
gen2_copy = copylib.deepcopy(gen2)
iarandom._set_generator_state_np116_(
gen2, iarandom.get_generator_state(gen1))
samples1 = gen1.uniform(0.0, 1.0, size=(100,))
samples2 = gen2.uniform(0.0, 1.0, size=(100,))
samples1_copy = gen1_copy.uniform(0.0, 1.0, size=(100,))
samples2_copy = gen2_copy.uniform(0.0, 1.0, size=(100,))
assert np.allclose(samples1, samples2)
assert np.allclose(samples1, samples1_copy)
assert not np.allclose(samples2, samples2_copy)
class Test_is_generator_equal_to(_Base):
@mock.patch("imgaug.random._is_generator_equal_to_np117")
@mock.patch("imgaug.random._is_generator_equal_to_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.is_generator_equal_to(gen, gen)
if isinstance(gen, np.random.RandomState):
assert result == "np116"
mock_np116.assert_called_once_with(gen, gen)
assert mock_np117.call_count == 0
else:
assert result == "np117"
mock_np117.assert_called_once_with(gen, gen)
assert mock_np116.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_generator_is_identical_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom._is_generator_equal_to_np117(gen, gen)
assert result is True
def test_generator_is_identical_np116(self):
gen = np.random.RandomState(1)
result = iarandom._is_generator_equal_to_np116(gen, gen)
assert result is True
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_generator_created_with_same_seed_np117(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen2 = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom._is_generator_equal_to_np117(gen1, gen2)
assert result is True
def test_generator_created_with_same_seed_np116(self):
gen1 = np.random.RandomState(1)
gen2 = np.random.RandomState(1)
result = iarandom._is_generator_equal_to_np116(gen1, gen2)
assert result is True
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_generator_is_copy_of_itself_np117(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom._is_generator_equal_to_np117(gen1,
copylib.deepcopy(gen1))
assert result is True
def test_generator_is_copy_of_itself_np116(self):
gen1 = np.random.RandomState(1)
result = iarandom._is_generator_equal_to_np116(gen1,
copylib.deepcopy(gen1))
assert result is True
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_generator_created_with_different_seed_np117(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen2 = np.random.Generator(iarandom.BIT_GENERATOR(2))
result = iarandom._is_generator_equal_to_np117(gen1, gen2)
assert result is False
def test_generator_created_with_different_seed_np116(self):
gen1 = np.random.RandomState(1)
gen2 = np.random.RandomState(2)
result = iarandom._is_generator_equal_to_np116(gen1, gen2)
assert result is False
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_generator_modified_to_have_same_state_np117(self):
gen1 = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen2 = np.random.Generator(iarandom.BIT_GENERATOR(2))
iarandom.set_generator_state_(gen2, iarandom.get_generator_state(gen1))
result = iarandom._is_generator_equal_to_np117(gen1, gen2)
assert result is True
def test_generator_modified_to_have_same_state_np116(self):
gen1 = np.random.RandomState(1)
gen2 = np.random.RandomState(2)
iarandom.set_generator_state_(gen2, iarandom.get_generator_state(gen1))
result = iarandom._is_generator_equal_to_np116(gen1, gen2)
assert result is True
class Test_advance_generator_(_Base):
@mock.patch("imgaug.random._advance_generator_np117_")
@mock.patch("imgaug.random._advance_generator_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
gen = iarandom.convert_seed_to_generator(1)
iarandom.advance_generator_(gen)
if isinstance(gen, np.random.RandomState):
mock_np116.assert_called_once_with(gen)
assert mock_np117.call_count == 0
else:
mock_np117.assert_called_once_with(gen)
assert mock_np116.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy1 = copylib.deepcopy(gen)
iarandom._advance_generator_np117_(gen)
gen_copy2 = copylib.deepcopy(gen)
iarandom._advance_generator_np117_(gen)
assert iarandom.is_generator_equal_to(gen, copylib.deepcopy(gen))
assert not iarandom.is_generator_equal_to(gen_copy1, gen_copy2)
assert not iarandom.is_generator_equal_to(gen_copy2, gen)
assert not iarandom.is_generator_equal_to(gen_copy1, gen)
def test_call_np116(self):
gen = np.random.RandomState(1)
gen_copy1 = copylib.deepcopy(gen)
iarandom._advance_generator_np116_(gen)
gen_copy2 = copylib.deepcopy(gen)
iarandom._advance_generator_np116_(gen)
assert iarandom.is_generator_equal_to(gen, copylib.deepcopy(gen))
assert not iarandom.is_generator_equal_to(gen_copy1, gen_copy2)
assert not iarandom.is_generator_equal_to(gen_copy2, gen)
assert not iarandom.is_generator_equal_to(gen_copy1, gen)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_samples_different_after_advance_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy1 = copylib.deepcopy(gen)
# first advance
iarandom._advance_generator_np117_(gen)
gen_copy2 = copylib.deepcopy(gen)
# second advance
iarandom._advance_generator_np117_(gen)
sample_before = gen_copy1.uniform(0.0, 1.0)
sample_after = gen_copy2.uniform(0.0, 1.0)
sample_after_after = gen.uniform(0.0, 1.0)
assert not np.isclose(sample_after, sample_before, rtol=0)
assert not np.isclose(sample_after_after, sample_after, rtol=0)
assert not np.isclose(sample_after_after, sample_before, rtol=0)
def test_samples_different_after_advance_np116(self):
gen = np.random.RandomState(1)
gen_copy1 = copylib.deepcopy(gen)
# first advance
iarandom._advance_generator_np116_(gen)
gen_copy2 = copylib.deepcopy(gen)
# second advance
iarandom._advance_generator_np116_(gen)
sample_before = gen_copy1.uniform(0.0, 1.0)
sample_after = gen_copy2.uniform(0.0, 1.0)
sample_after_after = gen.uniform(0.0, 1.0)
assert not np.isclose(sample_after, sample_before, rtol=0)
assert not np.isclose(sample_after_after, sample_after, rtol=0)
assert not np.isclose(sample_after_after, sample_before, rtol=0)
class Test_polyfill_integers(_Base):
def test_mocked_standard_call_np116(self):
def side_effect(low, high=None, size=None, dtype='l'):
return "np116"
gen = mock.MagicMock()
gen.randint.side_effect = side_effect
result = iarandom.polyfill_integers(gen, 2, 2000, size=(10,),
dtype="int8")
assert result == "np116"
gen.randint.assert_called_once_with(low=2, high=2000, size=(10,),
dtype="int8")
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_mocked_standard_call_np117(self):
def side_effect(low, high=None, size=None, dtype='int64',
endpoint=False):
return "np117"
gen = mock.MagicMock()
gen.integers.side_effect = side_effect
del gen.randint
result = iarandom.polyfill_integers(gen, 2, 2000, size=(10,),
dtype="int8", endpoint=True)
assert result == "np117"
gen.integers.assert_called_once_with(low=2, high=2000, size=(10,),
dtype="int8", endpoint=True)
def test_mocked_call_with_default_values_np116(self):
def side_effect(low, high=None, size=None, dtype='l'):
return "np116"
gen = mock.MagicMock()
gen.randint.side_effect = side_effect
result = iarandom.polyfill_integers(gen, 2)
assert result == "np116"
gen.randint.assert_called_once_with(low=2, high=None, size=None,
dtype="int32")
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_mocked_call_with_default_values_np117(self):
def side_effect(low, high=None, size=None, dtype='int64',
endpoint=False):
return "np117"
gen = mock.MagicMock()
gen.integers.side_effect = side_effect
del gen.randint
result = iarandom.polyfill_integers(gen, 2)
assert result == "np117"
gen.integers.assert_called_once_with(low=2, high=None, size=None,
dtype="int32", endpoint=False)
def test_mocked_call_with_default_values_and_endpoint_np116(self):
def side_effect(low, high=None, size=None, dtype='l'):
return "np116"
gen = mock.MagicMock()
gen.randint.side_effect = side_effect
result = iarandom.polyfill_integers(gen, 2, endpoint=True)
assert result == "np116"
gen.randint.assert_called_once_with(low=0, high=3, size=None,
dtype="int32")
def test_mocked_call_with_low_high_and_endpoint_np116(self):
def side_effect(low, high=None, size=None, dtype='l'):
return "np116"
gen = mock.MagicMock()
gen.randint.side_effect = side_effect
result = iarandom.polyfill_integers(gen, 2, 5, endpoint=True)
assert result == "np116"
gen.randint.assert_called_once_with(low=2, high=6, size=None,
dtype="int32")
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_sampled_values_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom.polyfill_integers(gen, 1, 10, size=(1000,),
endpoint=False)
assert result.dtype.name == "int32"
assert np.all(result >= 1)
assert np.all(result < 10)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_sampled_values_with_endpoint_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom.polyfill_integers(gen, 1, 10, size=(1000,),
endpoint=True)
assert result.dtype.name == "int32"
assert np.all(result >= 1)
assert np.all(result <= 10)
def test_sampled_values_np116(self):
gen = np.random.RandomState(1)
result = iarandom.polyfill_integers(gen, 1, 10, size=(1000,),
endpoint=False)
assert result.dtype.name == "int32"
assert np.all(result >= 1)
assert np.all(result < 10)
def test_sampled_values_with_endpoint_np116(self):
gen = np.random.RandomState(1)
result = iarandom.polyfill_integers(gen, 1, 10, size=(1000,),
endpoint=True)
assert result.dtype.name == "int32"
assert np.all(result >= 1)
assert np.all(result <= 10)
class Test_polyfill_random(_Base):
def test_mocked_standard_call_np116(self):
def side_effect(size=None):
return np.zeros((1,), dtype="float64")
gen = mock.MagicMock()
gen.random_sample.side_effect = side_effect
result = iarandom.polyfill_random(gen, size=(10,), dtype="float16")
assert result.dtype.name == "float16"
gen.random_sample.assert_called_once_with(
size=(10,))
def test_mocked_standard_call_np117(self):
def side_effect(size=None, dtype='d', out=None):
return "np117"
gen = mock.MagicMock()
gen.random.side_effect = side_effect
del gen.random_sample
result = iarandom.polyfill_random(gen, size=(10,), dtype="float16")
assert result == "np117"
gen.random.assert_called_once_with(
size=(10,), dtype="float16", out=None)
def test_mocked_call_with_out_arg_np116(self):
def side_effect(size=None):
return np.zeros((1,), dtype="float64")
gen = mock.MagicMock()
gen.random_sample.side_effect = side_effect
out = np.empty((10,), dtype="float16")
result = iarandom.polyfill_random(gen, size=(10,), dtype="float16",
out=out)
assert result.dtype.name == "float16"
# np1.16 does not support an out arg, hence it is not part of the
# expected call
gen.random_sample.assert_called_once_with(size=(10,))
def test_mocked_call_with_out_arg_np117(self):
def side_effect(size=None, dtype='d', out=None):
return "np117"
gen = mock.MagicMock()
gen.random.side_effect = side_effect
del gen.random_sample
out = np.empty((10,), dtype="float16")
result = iarandom.polyfill_random(gen, size=(10,), dtype="float16",
out=out)
assert result == "np117"
gen.random.assert_called_once_with(size=(10,), dtype="float16",
out=out)
def test_sampled_values_np116(self):
gen = np.random.RandomState(1)
result = iarandom.polyfill_random(gen, size=(1000,))
assert result.dtype.name == "float32"
assert np.all(result >= 0)
assert np.all(result < 1.0)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_sampled_values_np117(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
result = iarandom.polyfill_random(gen, size=(1000,))
assert result.dtype.name == "float32"
assert np.all(result >= 0)
assert np.all(result < 1.0)
def test_sampled_values_with_out_arg_np116(self):
gen = np.random.RandomState(1)
out = np.zeros((1000,), dtype="float32")
result = iarandom.polyfill_random(gen, size=(1000,), out=out)
assert result.dtype.name == "float32"
assert np.all(result >= 0)
assert | np.all(result < 1.0) | numpy.all |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import scipy.sparse
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def compare_single_input_op_to_numpy(keras_op,
np_op,
input_shape,
dtype='float32',
negative_values=True,
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
inputs = 2. * np.random.random(input_shape)
if negative_values:
inputs -= 1.
keras_output = keras_op(keras.backend.variable(inputs, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
def compare_two_inputs_op_to_numpy(keras_op,
np_op,
input_shape_a,
input_shape_b,
dtype='float32',
keras_args=None,
keras_kwargs=None,
np_args=None,
np_kwargs=None):
keras_args = keras_args or []
keras_kwargs = keras_kwargs or {}
np_args = np_args or []
np_kwargs = np_kwargs or {}
input_a = np.random.random(input_shape_a)
input_b = np.random.random(input_shape_b)
keras_output = keras_op(keras.backend.variable(input_a, dtype=dtype),
keras.backend.variable(input_b, dtype=dtype),
*keras_args, **keras_kwargs)
keras_output = keras.backend.eval(keras_output)
np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),
*np_args, **np_kwargs)
try:
np.testing.assert_allclose(keras_output, np_output, atol=1e-4)
except AssertionError:
raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '
'Expected ' + str(np_output) + ' but got ' +
str(keras_output))
@test_util.run_all_in_graph_and_eager_modes
class BackendUtilsTest(test.TestCase):
def test_backend(self):
self.assertEqual(keras.backend.backend(), 'tensorflow')
def test_espilon(self):
epsilon = 1e-2
keras.backend.set_epsilon(epsilon)
self.assertEqual(keras.backend.epsilon(), epsilon)
keras.backend.set_epsilon(1e-7)
def test_floatx(self):
floatx = 'float64'
keras.backend.set_floatx(floatx)
self.assertEqual(keras.backend.floatx(), floatx)
keras.backend.set_floatx('float32')
def test_image_data_format(self):
image_data_format = 'channels_first'
keras.backend.set_image_data_format(image_data_format)
self.assertEqual(keras.backend.image_data_format(), image_data_format)
keras.backend.set_image_data_format('channels_last')
def test_get_reset_uids(self):
self.assertEqual(keras.backend.get_uid('foo'), 1)
self.assertEqual(keras.backend.get_uid('foo'), 2)
keras.backend.reset_uids()
self.assertEqual(keras.backend.get_uid('foo'), 1)
def test_learning_phase(self):
with self.cached_session() as sess:
keras.backend.set_learning_phase(1)
self.assertEqual(keras.backend.learning_phase(), 1)
with self.assertRaises(ValueError):
keras.backend.set_learning_phase(2)
# Test running with a learning-phase-consuming layer
keras.backend.set_learning_phase(0)
x = keras.Input((3,))
y = keras.layers.BatchNormalization()(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
sess.run(y, feed_dict={x: np.random.random((2, 3))})
def test_learning_phase_scope(self):
with self.cached_session():
initial_learning_phase = keras.backend.learning_phase()
with keras.backend.learning_phase_scope(1) as lp:
self.assertEqual(lp, 1)
self.assertEqual(keras.backend.learning_phase(), 1)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with keras.backend.learning_phase_scope(0) as lp:
self.assertEqual(lp, 0)
self.assertEqual(keras.backend.learning_phase(), 0)
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
with self.assertRaises(ValueError):
with keras.backend.learning_phase_scope(None):
pass
self.assertEqual(keras.backend.learning_phase(), initial_learning_phase)
def test_int_shape(self):
x = keras.backend.ones(shape=(3, 4))
self.assertEqual(keras.backend.int_shape(x), (3, 4))
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(None, 4))
self.assertEqual(keras.backend.int_shape(x), (None, 4))
def test_in_train_phase(self):
with self.cached_session():
y1 = keras.backend.variable(1)
y2 = keras.backend.variable(2)
if context.executing_eagerly():
with keras.backend.learning_phase_scope(0):
y_val_test = keras.backend.in_train_phase(y1, y2).numpy()
with keras.backend.learning_phase_scope(1):
y_val_train = keras.backend.in_train_phase(y1, y2).numpy()
else:
y = keras.backend.in_train_phase(y1, y2)
f = keras.backend.function([keras.backend.learning_phase()], [y])
y_val_test = f([0])[0]
y_val_train = f([1])[0]
self.assertAllClose(y_val_test, 2)
self.assertAllClose(y_val_train, 1)
def test_is_keras_tensor(self):
x = keras.backend.variable(1)
self.assertEqual(keras.backend.is_keras_tensor(x), False)
x = keras.Input(shape=(1,))
self.assertEqual(keras.backend.is_keras_tensor(x), True)
with self.assertRaises(ValueError):
keras.backend.is_keras_tensor(0)
def test_stop_gradient(self):
x = keras.backend.variable(1)
y = keras.backend.stop_gradient(x)
if not context.executing_eagerly():
self.assertEqual(y.op.name[:12], 'StopGradient')
xs = [keras.backend.variable(1) for _ in range(3)]
ys = keras.backend.stop_gradient(xs)
if not context.executing_eagerly():
for y in ys:
self.assertEqual(y.op.name[:12], 'StopGradient')
@test_util.run_all_in_graph_and_eager_modes
class BackendVariableTest(test.TestCase):
def test_zeros(self):
with self.cached_session():
x = keras.backend.zeros((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones(self):
with self.cached_session():
x = keras.backend.ones((3, 4))
val = keras.backend.eval(x)
self.assertAllClose(val, np.ones((3, 4)))
def test_eye(self):
with self.cached_session():
x = keras.backend.eye(4)
val = keras.backend.eval(x)
self.assertAllClose(val, np.eye(4))
def test_zeros_like(self):
with self.cached_session():
x = keras.backend.zeros((3, 4))
y = keras.backend.zeros_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.zeros((3, 4)))
def test_ones_like(self):
with self.cached_session():
x = keras.backend.zeros((3, 4))
y = keras.backend.ones_like(x)
val = keras.backend.eval(y)
self.assertAllClose(val, np.ones((3, 4)))
def test_random_uniform_variable(self):
with self.cached_session():
x = keras.backend.random_uniform_variable((30, 20), low=1, high=2, seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1.5, atol=1e-1)
self.assertAllClose(val.max(), 2., atol=1e-1)
self.assertAllClose(val.min(), 1., atol=1e-1)
def test_random_normal_variable(self):
with self.cached_session():
x = keras.backend.random_normal_variable((30, 20), 1., 0.5,
seed=0)
val = keras.backend.eval(x)
self.assertAllClose(val.mean(), 1., atol=1e-1)
self.assertAllClose(val.std(), 0.5, atol=1e-1)
def test_count_params(self):
with self.cached_session():
x = keras.backend.zeros((4, 5))
val = keras.backend.count_params(x)
self.assertAllClose(val, 20)
def test_constant(self):
with self.cached_session():
ref_val = np.random.random((3, 4)).astype('float32')
x = keras.backend.constant(ref_val)
val = keras.backend.eval(x)
self.assertAllClose(val, ref_val)
def test_sparse_variable(self):
with self.cached_session():
val = scipy.sparse.eye(10)
x = keras.backend.variable(val)
self.assertTrue(isinstance(x, sparse_tensor.SparseTensor))
y = keras.backend.to_dense(x)
self.assertFalse(keras.backend.is_sparse(y))
@test_util.run_all_in_graph_and_eager_modes
class BackendLinearAlgebraTest(test.TestCase):
def test_dot(self):
x = keras.backend.ones(shape=(2, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.get_shape().as_list(), [2, 4])
x = keras.backend.ones(shape=(32, 28, 3))
y = keras.backend.ones(shape=(3, 4))
xy = keras.backend.dot(x, y)
self.assertEqual(xy.get_shape().as_list(), [32, 28, 4])
def test_batch_dot(self):
x = keras.backend.ones(shape=(32, 20, 1))
y = keras.backend.ones(shape=(32, 30, 20))
xy = keras.backend.batch_dot(x, y, axes=[1, 2])
self.assertEqual(xy.get_shape().as_list(), [32, 1, 30])
# TODO(fchollet): insufficiently tested.
def test_reduction_ops(self):
ops_to_test = [
(keras.backend.max, np.max),
(keras.backend.min, np.min),
(keras.backend.sum, np.sum),
(keras.backend.prod, np.prod),
(keras.backend.var, np.var),
(keras.backend.std, np.std),
(keras.backend.mean, np.mean),
(keras.backend.argmin, np.argmin),
(keras.backend.argmax, np.argmax),
]
for keras_op, np_op in ops_to_test:
with self.cached_session():
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': 1},
np_kwargs={'axis': 1})
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),
keras_kwargs={'axis': -1},
np_kwargs={'axis': -1})
if 'keepdims' in tf_inspect.getargspec(keras_op).args:
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7, 5),
keras_kwargs={'axis': 1,
'keepdims': True},
np_kwargs={'axis': 1,
'keepdims': True})
def test_elementwise_ops(self):
ops_to_test = [
(keras.backend.square, np.square),
(keras.backend.abs, np.abs),
(keras.backend.round, np.round),
(keras.backend.sign, np.sign),
(keras.backend.sin, np.sin),
(keras.backend.cos, np.cos),
(keras.backend.exp, np.exp),
]
for keras_op, np_op in ops_to_test:
with self.cached_session():
compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7))
ops_to_test = [
(keras.backend.sqrt, np.sqrt),
(keras.backend.log, np.log),
]
for keras_op, np_op in ops_to_test:
with self.cached_session():
compare_single_input_op_to_numpy(keras_op, np_op,
input_shape=(4, 7),
negative_values=False)
with self.cached_session():
compare_single_input_op_to_numpy(
keras.backend.clip, np.clip,
input_shape=(6, 4),
keras_kwargs={'min_value': 0.1, 'max_value': 2.4},
np_kwargs={'a_min': 0.1, 'a_max': 1.4})
with self.cached_session():
compare_single_input_op_to_numpy(
keras.backend.pow, np.power,
input_shape=(6, 4),
keras_args=[3],
np_args=[3])
def test_two_tensor_ops(self):
ops_to_test = [
(keras.backend.equal, np.equal),
(keras.backend.not_equal, np.not_equal),
(keras.backend.greater, np.greater),
(keras.backend.greater_equal, np.greater_equal),
(keras.backend.less, np.less),
(keras.backend.less_equal, np.less_equal),
(keras.backend.maximum, np.maximum),
(keras.backend.minimum, np.minimum),
]
for keras_op, np_op in ops_to_test:
with self.cached_session():
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(4, 7))
def test_relu(self):
x = ops.convert_to_tensor([[-4, 0], [2, 7]], 'float32')
with self.cached_session():
# standard relu
relu_op = keras.backend.relu(x)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# alpha (leaky relu used)
relu_op = keras.backend.relu(x, alpha=0.5)
if not context.executing_eagerly():
self.assertTrue('LeakyRelu' in relu_op.name)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, 0], [2, 7]])
# max_value < some elements
relu_op = keras.backend.relu(x, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 5]])
# nn.relu6 used
relu_op = keras.backend.relu(x, max_value=6)
if not context.executing_eagerly():
self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 6]])
# max value > 6
relu_op = keras.backend.relu(x, max_value=10)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# max value is float
relu_op = keras.backend.relu(x, max_value=4.3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 4.3]])
# max value == 0
relu_op = keras.backend.relu(x, max_value=0)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 0]])
# alpha and max_value
relu_op = keras.backend.relu(x, alpha=0.25, max_value=3)
self.assertAllClose(keras.backend.eval(relu_op), [[-1, 0], [2, 3]])
# threshold
relu_op = keras.backend.relu(x, threshold=3)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 7]])
# threshold is float
relu_op = keras.backend.relu(x, threshold=1.5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
# threshold is negative
relu_op = keras.backend.relu(x, threshold=-5)
self.assertAllClose(keras.backend.eval(relu_op), [[-4, 0], [2, 7]])
# threshold and max_value
relu_op = keras.backend.relu(x, threshold=3, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 5]])
# threshold and alpha
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 7]])
# threshold, alpha, and max_value
relu_op = keras.backend.relu(x, alpha=0.25, threshold=4, max_value=5)
self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 5]])
@test_util.run_all_in_graph_and_eager_modes
class BackendShapeOpsTest(test.TestCase):
def test_reshape(self):
with self.cached_session():
compare_single_input_op_to_numpy(keras.backend.reshape, np.reshape,
input_shape=(4, 7),
keras_args=[(2, 14)],
np_args=[(2, 14)])
def test_concatenate(self):
a = keras.backend.variable(np.ones((1, 2, 3)))
b = keras.backend.variable(np.ones((1, 2, 2)))
y = keras.backend.concatenate([a, b], axis=-1)
self.assertEqual(y.get_shape().as_list(), [1, 2, 5])
def test_permute_dimensions(self):
with self.cached_session():
compare_single_input_op_to_numpy(keras.backend.permute_dimensions,
np.transpose,
input_shape=(4, 7),
keras_args=[(1, 0)],
np_args=[(1, 0)])
def test_resize_images(self):
height_factor = 2
width_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 3)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.get_shape().as_list(), [1, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2)))
y = keras.backend.resize_images(x,
height_factor,
width_factor,
data_format)
self.assertEqual(y.get_shape().as_list(), [1, 3, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_images(x,
height_factor,
width_factor,
data_format='unknown')
def test_resize_volumes(self):
height_factor = 2
width_factor = 2
depth_factor = 2
data_format = 'channels_last'
x = keras.backend.variable(np.ones((1, 2, 2, 2, 3)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.get_shape().as_list(), [1, 4, 4, 4, 3])
data_format = 'channels_first'
x = keras.backend.variable(np.ones((1, 3, 2, 2, 2)))
y = keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format)
self.assertEqual(y.get_shape().as_list(), [1, 3, 4, 4, 4])
# Invalid use:
with self.assertRaises(ValueError):
keras.backend.resize_volumes(x,
depth_factor,
height_factor,
width_factor,
data_format='unknown')
def test_repeat_elements(self):
x = keras.backend.variable(np.ones((1, 3, 2)))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.get_shape().as_list(), [1, 9, 2])
# Use with a dynamic axis:
if not context.executing_eagerly():
x = keras.backend.placeholder(shape=(2, None, 2))
y = keras.backend.repeat_elements(x, 3, axis=1)
self.assertEqual(y.get_shape().as_list(), [2, None, 2])
def test_repeat(self):
x = keras.backend.variable(np.ones((1, 3)))
y = keras.backend.repeat(x, 2)
self.assertEqual(y.get_shape().as_list(), [1, 2, 3])
def test_flatten(self):
with self.cached_session():
compare_single_input_op_to_numpy(keras.backend.flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4 * 7 * 6,)])
def test_batch_flatten(self):
with self.cached_session():
compare_single_input_op_to_numpy(keras.backend.batch_flatten,
np.reshape,
input_shape=(4, 7, 6),
np_args=[(4, 7 * 6)])
def test_temporal_padding(self):
def ref_op(x, padding):
shape = list(x.shape)
shape[1] += padding[0] + padding[1]
y = np.zeros(tuple(shape))
y[:, padding[0]:-padding[1], :] = x
return y
with self.cached_session():
compare_single_input_op_to_numpy(keras.backend.temporal_padding,
ref_op,
input_shape=(4, 7, 6),
keras_args=[(2, 3)],
np_args=[(2, 3)])
def test_spatial_2d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
y = np.zeros(tuple(shape))
y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x
return y
with self.cached_session():
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_2d_padding,
ref_op,
input_shape=(2, 3, 2, 3),
keras_args=[((2, 3), (1, 2))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2))],
np_kwargs={'data_format': 'channels_first'})
def test_spatial_3d_padding(self):
def ref_op(x, padding, data_format='channels_last'):
shape = list(x.shape)
if data_format == 'channels_last':
shape[1] += padding[0][0] + padding[0][1]
shape[2] += padding[1][0] + padding[1][1]
shape[3] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1],
:] = x
else:
shape[2] += padding[0][0] + padding[0][1]
shape[3] += padding[1][0] + padding[1][1]
shape[4] += padding[2][0] + padding[2][1]
y = np.zeros(tuple(shape))
y[:, :,
padding[0][0]:-padding[0][1],
padding[1][0]:-padding[1][1],
padding[2][0]:-padding[2][1]] = x
return y
with self.cached_session():
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_last'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_last'})
compare_single_input_op_to_numpy(
keras.backend.spatial_3d_padding,
ref_op,
input_shape=(2, 3, 2, 3, 2),
keras_args=[((2, 3), (1, 2), (2, 3))],
keras_kwargs={'data_format': 'channels_first'},
np_args=[((2, 3), (1, 2), (2, 3))],
np_kwargs={'data_format': 'channels_first'})
@test_util.run_all_in_graph_and_eager_modes
class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
def test_bias_add(self):
with self.cached_session():
keras_op = keras.backend.bias_add
np_op = np.add
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(7,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 2, 7),
input_shape_b=(7,))
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((3, 4))
keras.backend.bias_add(x, b)
with self.assertRaises(ValueError):
x = keras.backend.variable((3, 4))
b = keras.backend.variable((4,))
keras.backend.bias_add(x, b, data_format='unknown')
def test_bias_add_channels_first(self):
with self.cached_session():
def keras_op(x, b):
return keras.backend.bias_add(x, b, data_format='channels_first')
def np_op(x, b):
if x.ndim == 3:
b = b.reshape((1, b.shape[0], 1))
if x.ndim == 4:
b = b.reshape((1, b.shape[0], 1, 1))
return x + b
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 7),
input_shape_b=(3,))
compare_two_inputs_op_to_numpy(keras_op, np_op,
input_shape_a=(4, 3, 5, 7),
input_shape_b=(3,))
def test_pool2d(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9])
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 9, 9, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 3])
val = np.random.random((10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 3])
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2, 2), strides=(2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2, 2))
with self.assertRaises(ValueError):
y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')
def test_pool3d(self):
val = np.random.random((10, 3, 10, 10, 10))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='max')
self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9, 9])
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_first',
pool_mode='avg')
self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9, 9])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 9, 9, 9, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 10, 3])
val = np.random.random((10, 10, 10, 10, 3))
x = keras.backend.variable(val)
y = keras.backend.pool3d(x, (2, 2, 2), strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5, 3])
def test_conv1d(self):
val = np.random.random((10, 4, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_first')
self.assertEqual(y.get_shape().as_list(), [10, 5, 8])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 8, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(1,),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 5])
val = np.random.random((10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv1d(x, k, strides=(2,),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5])
def test_local_conv_channels_dim(self):
filters = 3
batch_size = 2
for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]:
channels_in = input_shape[0]
input_spatial_shape = input_shape[1:]
dim = len(input_spatial_shape)
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs_cf = keras.backend.variable(inputs)
for kernel_size in [1, 2]:
for stride in [1, 2]:
kernel_sizes = (kernel_size,) * dim
strides = (stride,) * dim
output_shape = tuple([(i - kernel_size + stride) // stride
for i in input_spatial_shape])
kernel_shape = (np.prod(output_shape),
np.prod(kernel_sizes) * channels_in,
filters)
kernel = np.random.normal(
0,
1,
output_shape + (channels_in, np.prod(kernel_sizes), filters)
)
kernel_cf = np.reshape(kernel, kernel_shape)
kernel_cf = keras.backend.variable(kernel_cf)
conv_cf = keras.backend.local_conv(inputs_cf,
kernel_cf,
kernel_sizes,
strides,
output_shape,
'channels_first')
inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +
[1])
inputs_cl = keras.backend.variable(inputs_cl)
kernel_cl = np.reshape(
np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),
kernel_shape
)
kernel_cl = keras.backend.variable(kernel_cl)
conv_cl = keras.backend.local_conv(inputs_cl,
kernel_cl,
kernel_sizes,
strides,
output_shape,
'channels_last')
with self.cached_session():
conv_cf = keras.backend.eval(conv_cf)
conv_cl = keras.backend.eval(conv_cl)
self.assertAllCloseAccordingToType(
conv_cf,
np.transpose(conv_cl,
[0, dim + 1] + list(range(1, dim + 1))),
atol=1e-5
)
@parameterized.named_parameters(
('local_conv1d', (5, 6), (3,), (1,), (3,)),
('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))
def test_local_conv_1d_and_2d(self,
input_shape,
kernel_sizes,
strides,
output_shape):
filters = 3
batch_size = 2
inputs = np.random.normal(0, 1, (batch_size,) + input_shape)
inputs = keras.backend.variable(inputs)
kernel = np.random.normal(0, 1, (np.prod(output_shape),
np.prod(kernel_sizes) * input_shape[-1],
filters))
kernel = keras.backend.variable(kernel)
local_conv = keras.backend.local_conv(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
if len(output_shape) == 1:
local_conv_dim = keras.backend.local_conv1d(inputs,
kernel,
kernel_sizes,
strides,
'channels_last')
else:
local_conv_dim = keras.backend.local_conv2d(inputs,
kernel,
kernel_sizes,
strides,
output_shape,
'channels_last')
with self.cached_session():
local_conv = keras.backend.eval(local_conv)
local_conv_dim = keras.backend.eval(local_conv_dim)
self.assertAllCloseAccordingToType(local_conv, local_conv_dim)
def test_conv2d(self):
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv2d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv2d(x, k, strides=(2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv2d(x, k, (2, 2, 2))
def test_separable_conv2d(self):
val = np.random.random((10, 4, 10, 10))
x = keras.backend.variable(val)
depthwise_kernel_val = np.random.random((3, 3, 4, 1))
pointwise_kernel_val = np.random.random((1, 1, 4, 5))
dk = keras.backend.variable(depthwise_kernel_val)
pk = keras.backend.variable(pointwise_kernel_val)
y = keras.backend.separable_conv2d(
x, dk, pk, padding='valid', data_format='channels_first')
self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 5])
val = np.random.random((10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.separable_conv2d(
x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(
x, dk, pk, (2, 2), data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.separable_conv2d(x, dk, pk, (2, 2, 2))
def test_conv3d(self):
val = np.random.random((10, 4, 10, 10, 10))
x = keras.backend.variable(val)
kernel_val = np.random.random((3, 3, 3, 4, 5))
k = keras.backend.variable(kernel_val)
y = keras.backend.conv3d(x, k,
padding='valid', data_format='channels_first')
self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8, 8])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='valid', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 8, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(1, 1, 1),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 10, 5])
val = np.random.random((10, 10, 10, 10, 4))
x = keras.backend.variable(val)
y = keras.backend.conv3d(x, k, strides=(2, 2, 2),
padding='same', data_format='channels_last')
self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5, 5])
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
padding='other', data_format='channels_last')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2, 2),
data_format='other')
with self.assertRaises(ValueError):
y = keras.backend.conv3d(x, k, (2, 2))
def test_rnn(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [keras.backend.variable(init_state_val)]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
with self.cached_session():
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEquals(last_output.get_shape().as_list(),
[num_samples, output_dim])
self.assertEquals(outputs.get_shape().as_list(),
[num_samples, timesteps, output_dim])
for state in new_states:
self.assertEquals(state.get_shape().as_list(),
[num_samples, output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertEqual(len(new_states), 1)
state_list[i].append(keras.backend.eval(new_states[0]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
def test_rnn_additional_states(self):
# implement a simple RNN
num_samples = 4
input_dim = 5
output_dim = 3
timesteps = 6
input_val = np.random.random(
(num_samples, timesteps, input_dim)).astype(np.float32)
init_state_val = np.random.random(
(num_samples, output_dim)).astype(np.float32)
w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)
w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)
np_mask = np.random.randint(2, size=(num_samples, timesteps))
def rnn_step_fn():
w_i = keras.backend.variable(w_i_val)
w_o = keras.backend.variable(w_o_val)
def step_function(x, states):
assert len(states) == 2
prev_output = states[0]
output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o)
return output, [output,
keras.backend.concatenate([output, output], axis=-1)]
return step_function
# test default setup
last_output_list = [[], [], [], [], [], []]
outputs_list = [[], [], [], [], [], []]
state_list = [[], [], [], [], [], []]
additional_state_list = [[], [], [], [], [], []]
rnn_fn = rnn_step_fn()
inputs = keras.backend.variable(input_val)
initial_states = [
keras.backend.variable(init_state_val),
ops.convert_to_tensor(
np.concatenate([init_state_val, init_state_val], axis=-1))
]
mask = keras.backend.variable(np_mask)
kwargs_list = [
{'go_backwards': False, 'mask': None},
{'go_backwards': False, 'mask': None, 'unroll': True},
{'go_backwards': True, 'mask': None},
{'go_backwards': True, 'mask': None, 'unroll': True},
{'go_backwards': False, 'mask': mask},
{'go_backwards': False, 'mask': mask, 'unroll': True},
]
with self.cached_session():
for i, kwargs in enumerate(kwargs_list):
last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs,
initial_states,
**kwargs)
# check static shape inference
self.assertEqual(last_output.get_shape().as_list(),
[num_samples, output_dim])
self.assertEqual(outputs.get_shape().as_list(),
[num_samples, timesteps, output_dim])
# for state in new_states:
# self.assertEquals(state.get_shape().as_list(),
# [num_samples, output_dim])
self.assertEqual(new_states[0].get_shape().as_list(),
[num_samples, output_dim])
self.assertEqual(new_states[1].get_shape().as_list(),
[num_samples, 2 * output_dim])
last_output_list[i].append(keras.backend.eval(last_output))
outputs_list[i].append(keras.backend.eval(outputs))
self.assertEqual(len(new_states), 2)
state_list[i].append(keras.backend.eval(new_states[0]))
additional_state_list[i].append(keras.backend.eval(new_states[1]))
def assert_list_pairwise(z_list, atol=1e-05):
for (z1, z2) in zip(z_list[1:], z_list[:-1]):
self.assertAllClose(z1, z2, atol=atol)
assert_list_pairwise(last_output_list[0], atol=1e-04)
assert_list_pairwise(outputs_list[0], atol=1e-04)
assert_list_pairwise(state_list[0], atol=1e-04)
assert_list_pairwise(additional_state_list[0], atol=1e-04)
assert_list_pairwise(last_output_list[2], atol=1e-04)
assert_list_pairwise(outputs_list[2], atol=1e-04)
assert_list_pairwise(state_list[2], atol=1e-04)
assert_list_pairwise(additional_state_list[2], atol=1e-04)
for l, u_l in zip(last_output_list[0], last_output_list[1]):
self.assertAllClose(l, u_l, atol=1e-04)
for o, u_o in zip(outputs_list[0], outputs_list[1]):
self.assertAllClose(o, u_o, atol=1e-04)
for s, u_s in zip(state_list[0], state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[0], additional_state_list[1]):
self.assertAllClose(s, u_s, atol=1e-04)
for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):
self.assertAllClose(b_l, b_u_l, atol=1e-04)
for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):
self.assertAllClose(b_o, b_u_o, atol=1e-04)
for b_s, b_u_s in zip(state_list[2], state_list[3]):
self.assertAllClose(b_s, b_u_s, atol=1e-04)
for s, u_s in zip(additional_state_list[2], additional_state_list[3]):
self.assertAllClose(s, u_s, atol=1e-04)
def test_normalize_batch_in_training(self):
val = np.random.random((10, 3, 10, 10))
x = keras.backend.variable(val)
reduction_axes = (0, 2, 3)
g_val = np.random.random((3,))
b_val = | np.random.random((3,)) | numpy.random.random |
"""Mapping functions that get values on a prescribed Cartesian coordinates grids from GTS output data files which are in flux coordinates.
"""
import Map_Mod_C as mmc
import numpy as np
from sdp.geometry import grid
import scipy.io.netcdf as nc
from scipy.interpolate import NearestNDInterpolator
from time import clock
class GTS_loader_Error(Exception):
"""Exception class for handling GTS loading errors
"""
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class GTS_Loader:
"""GTS Loading class
For each GTS run case, setup all the loading parameters, read out necessary data, and output to suited format.
"""
def __init__(self, grid, t0,dt,nt, fluc_file_path,eq_fname,prof_fname,gts_file_path, n_cross_section = 1, phi_fname_head = 'PHI.', den_fname_head = 'DEN.', n_boundary = 1001, amplification = 1):
"""Initialize Loading Parameters:
grid: sdp.geometry.Grid.Cartesian2D or Cartesian3D object, contains the output grid information.
t0: int; Starting time of the sampling series, in simulation record step counts.
dt: int; The interval between two sample points, in unit of simulation record step counts.
nt: int; The total number of time_steps.
n_cross_section: int; total cross-sections used for enlarging the ensemble
n_boundary: int; The total number of grid points resolving the plasma last closed flux surface. Normally not important.
fluc_file_path: string; directory where to store the output fluctuation files
eq_fname: string; filename of the equalibrium file, either absolute or relative path.
phi_fname_head: string; The header letters of the phi record file before the toroidal plane number, usually "PHI."
den_fname_head: string; The header letters of the density record file before the toroidal plane number, usually "DEN."
gts_file_path: string; the directory where the PHI data files are stored.
"""
self.grid = grid
if(isinstance(grid, grid.Cartesian2D)):
self.dimension = 2
self.xmin,self.xmax,self.nx = grid.Rmin,grid.Rmax,grid.NR
self.ymin,self.ymax,self.ny = grid.Zmin,grid.Zmax,grid.NZ
self.zmin,self.zmax,self.nz = 0,0,1
elif(isinstance(grid, grid.Cartesian3D)):
self.dimension = 3
self.xmin,self.xmax,self.nx = grid.Xmin,grid.Xmax,grid.NX
self.ymin,self.ymax,self.ny = grid.Ymin,grid.Ymax,grid.NY
self.zmin,self.zmax,self.nz = grid.Zmin,grid.Zmax,grid.NZ
else:
raise GTS_loader_Error('grid not valid. Right now GTS loader only support Cartesian2D or Cartesian3D grid.')
self.t0,self.dt,self.nt = t0,dt,nt
self.time_steps = self.t0 + np.arange(self.nt) *self.dt
self.n_cross_section = n_cross_section
self.fluc_file_path = fluc_file_path
self.eq_fname = eq_fname
self.prof_fname = prof_fname
self.phi_fname_head = phi_fname_head
self.den_fname_head = den_fname_head
self.gts_file_path = gts_file_path
self.n_boundary = n_boundary
self.amplification = 1
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=self.zmin,Zmax=self.zmax,NZ=self.nz,
NBOUNDARY=self.n_boundary,
TStart=self.t0,TStep=self.dt,NT=self.nt,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
mmc.show_para_()
self.get_fluctuations_from_GTS()
if (self.dimension == 3):
self.dne_on_grid = self.ne0_on_grid[np.newaxis,np.newaxis,:,:,:] * (self.dne_ad_on_grid + self.nane_on_grid)
self.B_2d = self.Btol_2d
elif (self.dimension == 2):
self.ne_on_grid = self.ne0_on_grid * (1 + self.dne_ad_on_grid + self.nane_on_grid)
self.B_on_grid = self.Bt_on_grid
def show_para(self):
mmc.show_para_()
def get_fluctuations_from_GTS(self):
"""load fluctuations on grid using C_function
Create variables:
equilibrium quantities:
ne0_on_grid: double ndarray (1,ny,nx), equilibrium electron density.
Te0_on_grid: double ndarray (1,ny,nx), equilibrium electron temperature.
Bt_on_grid,Bp_on_grid,BR_on_grid,BZ_on_grid: double ndarray (1,ny,nx), equilibrium toroidal and poloidal magnetic field, and R,Z component of Bpol.
fluctuations:
dne_ad_on_grid: double ndarray (nt,nz,ny,nx), adiabatic electron density, calculated from fluctuating potential phi: dne_ad_on_grid/ne0_on_grid = e*phi/Te0_on_grid
nane_on_grid : double ndarray (nt,nz,ny,nx), non-adiabatic electron density normalized to local equilibrium density, read from file.
nate_on_grid : double ndarray (nt,nz,ny,nx), non-adiabatic electron temperature normalized to equilibrium temperature at a reference radius, read from file.
"""
t0 = clock()
if(self.dimension == 3):
x1d = self.grid.X1D
y1d = self.grid.Y1D
self.x2d = np.zeros((1,self.ny,self.nx))+ x1d[np.newaxis,np.newaxis,:]
self.y2d = np.zeros((1,self.ny,self.nx))+ y1d[np.newaxis,:,np.newaxis]
z2d = np.zeros((1,self.ny,self.nx))
x3d = self.grid.X3D
y3d = self.grid.Y3D
z3d = self.grid.Z3D
self.dne_ad_on_grid = np.zeros((self.n_cross_section,self.nt,self.nz,self.ny,self.nx))
self.nane_on_grid = np.zeros((self.n_cross_section,self.nt,self.nz,self.ny,self.nx))
self.nate_on_grid = np.zeros_like(self.nane_on_grid)
#Note that new equilibrium loading convention needs only 2D equilibrium data.
self.ne0_2d = np.zeros((1,self.ny,self.nx))
self.Te0_2d = np.zeros((1,self.ny,self.nx))
self.Btol_2d = np.zeros((1,self.ny,self.nx))
self.Bp_2d = np.zeros((1,self.ny,self.nx))
self.BR_2d = np.zeros((1,self.ny,self.nx))
self.BZ_2d = np.zeros((1,self.ny,self.nx))
mismatched_eq = np.zeros_like(self.x2d,dtype = 'int32')
fluc_2d = np.zeros((self.nt,1,self.ny,self.nx))
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=0,Zmax=0,NZ=1,
NBOUNDARY=self.n_boundary,
TStart=1,TStep=1,NT=1,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
#one seperate 2D run to get all the equilibrium quantities
mmc.get_GTS_profiles_(self.x2d,self.y2d,z2d,self.ne0_2d,self.Te0_2d,self.Btol_2d,self.Bp_2d, self.BR_2d, self.BZ_2d, fluc_2d,fluc_2d,fluc_2d,mismatched_eq,0)
self._fill_mismatched_eq(mismatched_eq)
#calculate B_toroidal based on B_total and B_poloidal
self.BPhi_2d = np.sqrt(self.Btol_2d**2 - self.Bp_2d**2)
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=self.zmin,Zmax=self.zmax,NZ=self.nz,
NBOUNDARY=self.n_boundary,
TStart=self.t0,TStep=self.dt,NT=self.nt,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
#temporary arrays to hold 3D equilibrium quantities.
self.ne0_on_grid = np.zeros_like(x3d)
self.Te0_on_grid = np.zeros_like(x3d)
self.Btol_on_grid = np.zeros_like(x3d)
self.Bp_on_grid = np.zeros_like(x3d)
self.BR_on_grid = np.zeros_like(x3d)
self.BZ_on_grid = np.zeros_like(x3d)
self.mismatch = np.zeros_like(x3d,dtype = 'int32')
self.total_cross_section = mmc.get_GTS_profiles_(x3d,y3d,z3d,self.ne0_on_grid,self.Te0_on_grid,self.Btol_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[0,...],self.nane_on_grid[0,...],self.nate_on_grid[0,...],self.mismatch, 0)
dcross = int(np.floor(self.total_cross_section / self.n_cross_section))
self.center_cross_sections = np.arange(self.n_cross_section) * dcross
for i in range(1,len(self.center_cross_sections)):
mmc.get_GTS_profiles_(x3d,y3d,z3d,self.ne0_on_grid,self.Te0_on_grid,self.Btol_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[i,...],self.nane_on_grid[i,...],self.nate_on_grid[i,...],self.mismatch,self.center_cross_sections[i])
self._fill_mismatched(self.mismatch)
elif(self.dimension == 2):
x1d = self.grid.R1D
y1d = self.grid.Z1D
x2d = np.zeros((1,self.ny,self.nx))+ x1d[np.newaxis,np.newaxis,:]
y2d = np.zeros((1,self.ny,self.nx))+ y1d[np.newaxis,:,np.newaxis]
z2d = np.zeros((1,self.ny,self.nx))
self.dne_ad_on_grid = np.zeros((self.n_cross_section,self.nt,1,self.ny,self.nx))
self.nane_on_grid = np.zeros((self.n_cross_section,self.nt,1,self.ny,self.nx))
self.nate_on_grid = np.zeros_like(self.nane_on_grid)
#Note that new equilibrium loading convention needs only 2D equilibrium data.
self.ne0_on_grid = np.zeros((1,self.ny,self.nx))
self.Te0_on_grid = np.zeros((1,self.ny,self.nx))
self.Bt_on_grid = np.zeros((1,self.ny,self.nx))
self.Bp_on_grid = np.zeros((1,self.ny,self.nx))
self.BR_on_grid = np.zeros((1,self.ny,self.nx))
self.BZ_on_grid = np.zeros((1,self.ny,self.nx))
self.mismatch = np.zeros_like(self.ne0_on_grid,dtype = 'int32')
self.total_cross_section = mmc.get_GTS_profiles_(x2d,y2d,z2d,self.ne0_on_grid,self.Te0_on_grid,self.Bt_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[0,...],self.nane_on_grid[0,...],self.nate_on_grid[0,...],self.mismatch, 0)
dcross = int(np.floor(self.total_cross_section / self.n_cross_section))
self.center_cross_sections = np.arange(self.n_cross_section) * dcross
for i in range(1,len(self.center_cross_sections)):
mmc.get_GTS_profiles_(x2d,y2d,z2d,self.ne0_on_grid,self.Te0_on_grid,self.Bt_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[i,...],self.nane_on_grid[i,...],self.nate_on_grid[i,...],self.mismatch,self.center_cross_sections[i])
t1 = clock()
self._fill_mismatched(self.mismatch)
t2 = clock()
print('Time used for interpolating mismatched points: {0}\nTotal time used:{1}'.format(t2-t1,t2-t0))
def _fill_mismatched(self,mismatch):
"""interpolate upon correctly matched values, to get values on mismatched points
"""
print('Start correcting mismatched points.')
correct_idx = (mismatch == 0)
mismatch_idx = (mismatch == 1)
if self.dimension == 3:
x_correct = self.grid.X3D[correct_idx]
y_correct = self.grid.Y3D[correct_idx]
z_correct = self.grid.Z3D[correct_idx]
xwant = self.grid.X3D[mismatch_idx]
ywant = self.grid.Y3D[mismatch_idx]
zwant = self.grid.Z3D[mismatch_idx]
points = np.array([z_correct,y_correct,x_correct]).T
points_want = np.array([zwant,ywant,xwant]).T
self.ne0_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.ne0_on_grid[correct_idx])(points_want)
self.Te0_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Te0_on_grid[correct_idx])(points_want)
self.Btol_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Btol_on_grid[correct_idx])(points_want)
self.Bp_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Bp_on_grid[correct_idx])(points_want)
self.BR_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.BR_on_grid[correct_idx])(points_want)
self.BZ_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.BZ_on_grid[correct_idx])(points_want)
for i in range(self.n_cross_section):
for j in range(self.nt):
self.dne_ad_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.dne_ad_on_grid[i,j][correct_idx])(points_want)
self.nane_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.nane_on_grid[i,j][correct_idx])(points_want)
self.nate_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.nate_on_grid[i,j][correct_idx])(points_want)
print('Cross-section {0} finished.'.format(i))
else:
r_correct = self.grid.R2D[correct_idx[0,:,:]]
z_correct = self.grid.Z2D[correct_idx[0,:,:]]
rwant = self.grid.R2D[mismatch_idx[0,:,:]]
zwant = self.grid.Z2D[mismatch_idx[0,:,:]]
points = | np.array([z_correct,r_correct]) | numpy.array |
"""
tests.test_utilities
unit test for psola.utilities module
Author: jreinhold
Created on: Aug 10, 2017
"""
import unittest
import numpy as np
from psola.errors import PsolaError
from psola.utilities.low_pass_filter import lpf
from psola.utilities.center_clipping import center_clipping
from psola.utilities.find import find
class TestLowPassFilter(unittest.TestCase):
def setUp(self):
self.fs = 2000
t = | np.linspace(0, 1.0, 2**11) | numpy.linspace |
"""
Map CEMS CC generators to EIA CC units
"""
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def method_1(boilers, eia_plants):
"""
Method 1 to map boilers to eia plants
"""
# Create boiler-specific unit (Method 1)
no_eia_plant = boilers.loc[~ | np.in1d(boilers["Plant Code"], eia_plants) | numpy.in1d |
import numpy as np
import sys
import os
os.stat('pylib')
sys.path.append("pylib")
sys.path.append("pylib/utils")
import argparse
import tables
from pulp.utils.parallel import pprint
from pulp.utils.barstest import generate_bars_dict
from sklearn.neural_network.multilayer_perceptron import MLPClassifier
#%% -- General params --
seed = 0
#%% -- BSC parameters --
# datatype... possible values: 'MNIST', 'BARSTEST' or 'BARS'
datatype = 'BARS'
neg_bars = False
# function of the model
mod_fun = 'BSC_ET'
# max iterations
Iters = 50
# Scale of the bars
SCALE = 10
# normalized gen. sigma from gaussian
sigma = .35
# pi number of causes
pi = .2
# Number of datapoints to generate
N = int(2000)
Nc_lo = (0.0,0.0)
Nc_hi = (.9,1.0)
# Dimensionality of the model
size = 5 # D = size*size
H = 2*size # number of latents
D = size**2 # dimensionality of observed data
force_H = 0 # if H is fixed given by input or default
# Approximation parameters for Expectation Truncation
Hprime = 5
gamma = 3
# annealing params
Tmax = 1.2
T_iter = .8
priT = True
#%% -- memory heavy options --
# if (try) doing an impainting test
impaint = 0
# if doing a prediction of the binary latents (s) from the MLP
predict = 0;
#%% -- (ML)P-params --
# enabling co-training
K_mlp = False
# data set size
Ntr = 10000
Nte = 7500
Nva = 4500
# after training max consecutive iterations
mlp_max_iter = 50
# generative max iteration fraction
mlp_gen_iter = 1
# generative max/min training data fraction
mlp_gen_data = 1
mlp_min_data = 1/Ntr
# if the validation fraction in training shall be noiseless
noiseless_valid = False
# no. batches for the gen. mlp data
batches = 1
mlp_lrate = .001
mlp_lmomm = 'constant'
early_stopping = True
warm_start = True
nesterovs_momentum = True
# hidden layer sizes
n_hidden=np.array([]) # here no hidden layers
validation_fraction=.1
batch_size=200
eta = .001
# experimental ...
# if modifying sklearn/neural_network/MLP.py
# and adding max option
# for maximum combination instead of linear
mlp_maxpool = False
gen_datas = False
gen_train = False
mlp_ratio = .0
#%% top-layer variables
top_train = .5 #when does the training start
top_iters = (.8,50) #when does how many iterations happen per step
top_mulIt = 0 #should there even be more top-layer iterations each step
#%% -- imput parsing 2.0 xD --
n = 2
while n < len(sys.argv):
n2 = 1
try:
a = float(sys.argv[n+1])
try:
exec('%s = %f' % (sys.argv[n],a))
pprint('%s = %f' % (sys.argv[n],a))
except:
pprint('Error: Bad argument name!')
except:
if sys.argv[n] == 'outpath' or sys.argv[n] == 'h5path' or sys.argv[n] == 'h5_path':
try:
exec('h5path = "%s"' % (sys.argv[n+1].strip()))
pprint('h5-path = "%s"' % sys.argv[n+1].strip())
except:
pprint('Error: Bad h5-path name!')
n2 = 1
elif sys.argv[n] == 'mlppath' or sys.argv[n] == 'h5_mlp':
try:
exec('mlppath = "%s"' % (sys.argv[n+1].strip()))
pprint('mlp-path = "%s"' % sys.argv[n+1].strip())
except:
pprint('Error: Bad mlp path string: %s' %sys.argv[n])
n2 = 1
elif sys.argv[n] == 'batch_size' or sys.argv[n] == 'mlp_lmomm' \
or sys.argv[n] == 'txt_nm' or sys.argv[n] == 'h5f_nm' \
or sys.argv[n] == 'actfun' or sys.argv[n] == 'mod_fun'\
or sys.argv[n] == 'txt_scr':
try:
exec('%s = "%s"' % (sys.argv[n], sys.argv[n+1].strip()))
pprint('%s: "%s"' % (sys.argv[n], sys.argv[n+1].strip()))
exec('print(type(%s))' %sys.argv[n])
except:
pprint('Error: Bad strg arg: %s' %sys.argv[n])
n2 = 1
elif sys.argv[n] == 'top_iters' or sys.argv[n] == 'Nc_lo' or sys.argv[n] == 'Nc_hi' or sys.argv[n] == 'Nc_lo':
try:
exec('%s = %s' % (sys.argv[n], sys.argv[n+1].strip()))
pprint('%s: %s' % (sys.argv[n], sys.argv[n+1].strip()))
exec('print(type(%s))' %sys.argv[n])
except:
pprint('Error: Bad strg arg: %s' %sys.argv[n])
n2 = 1
else:
try:
exec('%s = %s' % ( sys.argv[n], sys.argv[n+1].strip() ))
pprint('%s = %s' %( sys.argv[n], sys.argv[n+1].strip() ))
n2 = 1
except:
try:
exec('%s = 1' % sys.argv[n].strip())
pprint('%s: 1' % sys.argv[n].strip())
except:
pprint('Error: Bad argument name!')
n2 = 0
n += 1 + n2
# -- integers --
#size = int(size)
D = int(D) # things that cannot be changed
size = int(size)
H = int(H)
Hprime = int(Hprime)
gamma = int(gamma)
N = int(N)
Ntr = int(Ntr)
Nte = int(Nte)
Nva = int(Nva)
Iters = int(Iters)
# -- flags --
mlp_gen_iter = int(mlp_gen_iter) #or 100*int(mlp_gen_iter<1)
mlp_gen_data = int(mlp_gen_data) # no. data points else Ntr or N_bsc
# -- params --
if 'piH' in locals():
pi = piH/H
# fit truncation params to barstest
if 'truncfit' in locals():
if truncfit:
if not force_H:
H = int(np.sqrt(D)) * 2
gamma = int(pi*H+1)
Hprime = gamma + 2
# -- n_hidden --
n_hidden = np.round( | np.array(n_hidden) | numpy.array |
# -*- coding: utf-8 -*-
"""
==============================
1D Wasserstein barycenter demo
==============================
@author: rflamary
"""
import numpy as np
import matplotlib.pylab as pl
import ot
from mpl_toolkits.mplot3d import Axes3D #necessary for 3d plot even if not used
import scipy as sp
import scipy.signal as sps
#%% parameters
n=10 # nb bins
# bin positions
x=np.arange(n,dtype=np.float64)
xx,yy=np.meshgrid(x,x)
xpos=np.hstack((xx.reshape(-1,1),yy.reshape(-1,1)))
M=ot.dist(xpos)
I0=((xx-5)**2+(yy-5)**2<3**2)*1.0
I1=((xx-7)**2+(yy-7)**2<3**2)*1.0
I0/=I0.sum()
I1/=I1.sum()
i0=I0.ravel()
i1=I1.ravel()
M=M[i0>0,:][:,i1>0].copy()
i0=i0[i0>0]
i1=i1[i1>0]
Itot=np.concatenate((I0[:,:,np.newaxis],I1[:,:,np.newaxis]),2)
#%% plot the distributions
pl.figure(1)
pl.subplot(2,2,1)
pl.imshow(I0)
pl.subplot(2,2,2)
pl.imshow(I1)
#%% barycenter computation
alpha=0.5 # 0<=alpha<=1
weights=np.array([1-alpha,alpha])
def conv2(I,k):
return sp.ndimage.convolve1d(sp.ndimage.convolve1d(I,k,axis=1),k,axis=0)
def conv2n(I,k):
res=np.zeros_like(I)
for i in range(I.shape[2]):
res[:,:,i]=conv2(I[:,:,i],k)
return res
def get_1Dkernel(reg,thr=1e-16,wmax=1024):
w=max(min(wmax,2*int((-np.log(thr)*reg)**(.5))),3)
x= | np.arange(w,dtype=np.float64) | numpy.arange |
# MIT license
# <EMAIL>
'''rigid-body kinematics'''
from __future__ import absolute_import
import numpy as np
import math
import sys
from contextlib import contextmanager
from numpy.linalg import norm
# a few helpers
def vec(*coords): return np.array(coords, dtype=float)
deg = math.pi / 180.0
ex = vec(1, 0, 0)
ey = vec(0, 1, 0)
ez = vec(0, 0, 1)
# slices for quaternion/rigid/deriv
imag_slice = slice(1, None)
real_index = 0
angular_slice = slice(None, 3)
linear_slice = slice(3, None)
orient_slice = slice(None, 4)
center_slice = slice(4, None)
def norm2(x): return x.dot(x)
class Rigid3(np.ndarray):
'''SE(3) group'''
dim = 6
__slots__ = ()
class Deriv(np.ndarray):
'''SE(3) lie algebra as (translation, rotation)'''
__slots__ = ()
def __new__(cls, *args, **kwargs):
return np.ndarray.__new__(cls, 6)
def __init__(self, value=None):
if value is None:
self[:] = 0
else:
self[:] = value
@property
def linear(self):
return self[linear_slice].view(np.ndarray)
@linear.setter
def linear(self, value):
self[linear_slice] = value
@property
def angular(self):
return self[angular_slice].view(np.ndarray)
@angular.setter
def angular(self, value):
self[angular_slice] = value
@staticmethod
def exp(x):
'''group exponential'''
res = Rigid3()
res.orient = Quaternion.exp(x.angular)
res.center = res.orient(Quaternion.dexp(x.angular).dot(x.linear))
return res
@property
def center(self):
return self[center_slice].view(np.ndarray)
@center.setter
def center(self, value):
self[center_slice] = value
@property
def orient(self):
return self[orient_slice].view(Quaternion)
@orient.setter
def orient(self, value):
self[orient_slice] = value
def __new__(cls, *args, **kwargs):
return np.ndarray.__new__(cls, 7)
def __init__(self, value=None, **kwargs):
'''construct a rigid transform from given value, identity if none'''
if value is None:
self[:] = 0
self.orient.real = 1
else:
self[:] = value
for k, v in kwargs.items():
setattr(self, k, v)
def inv(self):
'''SE(3) inverse'''
res = Rigid3()
res.orient = self.orient.inv()
res.center = -res.orient(self.center)
return res
def __mul__(self, other):
'''SE(3) product'''
res = Rigid3()
res.orient = self.orient * other.orient
res.center = self.center + self.orient(other.center)
return res
def __call__(self, x):
'''applies rigid transform to vector x'''
return self.center + self.orient(x)
def Ad(self):
'''SE(3) group adjoint matrix in lie algebra coordinates'''
res = np.zeros((6, 6))
R = self.orient.matrix()
t = Quaternion.hat(self.center)
res[angular_slice, angular_slice] = R
res[linear_slice, linear_slice] = R
res[linear_slice, angular_slice] = t.dot(R)
return res
def matrix(self):
'''homogeneous matrix for rigid transformation'''
res = np.zeros((4, 4))
res[:3, :3] = self.orient.matrix()
res[:3, 3] = self.center
res[3, 3] = 1
return res
def log(self):
'''SE(3) logarithm'''
res = Rigid3.Deriv()
res.angular = self.orient.log()
res.linear = self.orient.dlog().dot(self.orient.conj()(self.center))
return res
@staticmethod
def rotation(q):
res = Rigid3()
res.orient = q
return res
@staticmethod
def translation(v):
res = Rigid3()
res.center = v
return res
class Quaternion(np.ndarray):
__slots__ = ()
dim = 3
epsilon = sys.float_info.epsilon
def __new__(cls, *args):
return np.ndarray.__new__(cls, 4)
def __init__(self, value=None):
'''construct a quaternion with given values, identity by default'''
if value is None:
self.real = 1
self.imag = 0
else:
self[:] = value
@staticmethod
def half_turn(axis):
'''construct a quaternion for a half-turn of given axis.
:param axis: must be normalized
'''
res = Quaternion()
res.real = 0
res.imag = axis
return res
def inv(self):
'''quaternion inverse. use conj for unit quaternions instead'''
return self.conj() / self.dot(self)
def conj(self):
'''quaternion conjugate'''
res = Quaternion()
res.real = self.real
res.imag = -self.imag
return res
@property
def real(self):
'''quaternion real part'''
return self[real_index]
@real.setter
def real(self, value): self[real_index] = value
@property
def imag(self):
'''quaternion imaginary part'''
return self[imag_slice].view(np.ndarray)
@imag.setter
def imag(self, value): self[imag_slice] = value
@property
def coeffs(self): return self.view(np.ndarray)
@coeffs.setter
def coeffs(self, value):
self.coeffs[:] = value
def normalize(self):
'''normalize quaternion'''
result = norm(self)
self /= result
return result
def flip(self):
'''flip quaternion in the real positive halfplane, if needed'''
if self.real < 0:
self = -self
def __mul__(self, other):
'''quaternion product'''
res = Quaternion()
# TODO there might be a more efficient way
res.real = self.real * other.real - self.imag.dot(other.imag)
res.imag = self.real * other.imag + other.real * self.imag + np.cross(self.imag, other.imag)
return res
def __call__(self, x):
'''rotate a vector. self should be normalized'''
# euler-rodrigues formula
x = np.array(x)
cross = np.cross(self.imag, 2 * x)
return x + self.real * cross + np.cross(self.imag, cross)
def matrix(self):
'''rotation matrix conversion'''
K = Quaternion.hat(self.imag)
return np.identity(3) + (2.0 * self.real) * K + 2.0 * K.dot(K)
@staticmethod
def from_matrix(R):
# extract skew-symmetric part
v = Quaternion.hat_inv((R - R.T) / 2.0)
n = norm(v)
if n < Quaternion.epsilon:
# R is either I or 2 vv^T - I
x = np.random.rand(3)
v = (R.dot(x) - x) / 2.0
n = norm(v)
if n < Quaternion.epsilon:
return Quaternion()
else:
return Quaternion.half_turn((x + v) / norm(x + v))
# note: we can't simply get theta from asin(n) since we'd miss half the
# values
tmp = (np.trace(R) - 1.0) / 2.0
tmp = max(-1.0, min(1.0, tmp)) # paranoid clamping
theta = math.acos(tmp)
res = Quaternion()
res.real = math.cos(theta / 2)
res.imag = math.sin(theta / 2) * (v / n)
return res
@staticmethod
def from_euler(xyz, order='zyx', degrees=False):
'''q = qz qy qx, xyz in radians'''
if degrees:
xyz = np.array(xyz) / deg
qs = [Quaternion.exp(xyz * ex),
Quaternion.exp(xyz * ey),
Quaternion.exp(xyz * ez)]
q = Quaternion()
for a in order:
index = ord(a) - ord('x')
q = q * qs[index]
return q
@staticmethod
def exp(x):
'''quaternion exponential, halved to match SO(3)'''
x = np.array(x)
theta = np.linalg.norm(x)
res = Quaternion()
if math.fabs(theta) < Quaternion.epsilon:
# fallback to gnomonic projection: (1 + x) / || 1 + x ||
res.imag = x / 2.0
res.normalize()
return res
half_theta = theta / 2.0
s = math.sin(half_theta)
c = math.cos(half_theta)
res.real = c
res.imag = x * (s / theta)
return res
@staticmethod
def dexp(x):
'''exponential derivative SO(3) in body-fixed coordinates'''
theta = norm(x)
if theta < Quaternion.epsilon:
return np.identity(3)
n = x / theta
P = np.outer(n, n)
H = Quaternion.hat(n)
# we want SO(3) exponential
theta = theta / 2.0
s = math.sin(theta)
c = math.cos(theta)
I = np.identity(3)
return P + (s / theta) * (c * I - s * H).dot(I - P)
def dlog(self):
'''logarithm derivative SO(3) in body-fixed coordinates'''
n, theta = self.axis_angle()
if n is None:
return np.identity(3)
half_theta = theta / 2
res = np.zeros((3, 3))
P = np.outer(n, n)
log = n * theta
return (P + (half_theta / math.tan(half_theta)) * (np.identity(3) - P) + Quaternion.hat(log / 2))
def log(self):
'''quaternion logarithm, doubled to match SO(3)'''
axis, angle = self.axis_angle()
if axis is None:
return np.zeros(3)
return angle * axis
def axis_angle(self):
'''rotation axis/angle'''
q = self if self.real >= 0 else -self
assert q.real >= 0
half_angle = math.acos(min(q.real, 1.0))
if half_angle > Quaternion.epsilon:
return q.imag / math.sin(half_angle), 2.0 * half_angle
n = norm(q.imag)
if n > Quaternion.epsilon:
sign = 1.0 if half_angle > 0 else -1.0
return q.imag * (sign / n), 2 * half_angle
return None, 2 * half_angle
def angle(self):
'''rotation angle'''
return self.axis_angle()[1]
def axis(self):
'''rotation axis'''
return self.axis_angle()[0]
@staticmethod
def from_vectors(x, y):
'''rotation sending x to y'''
# compute -yx
yx = Quaternion()
dot = x.dot(y)
yx.real = y.dot(x)
yx.imag = | np.cross(x, y) | numpy.cross |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import numpy as np
import unittest
CARDINAL_DIRECTIONS = ("N", "E", "S", "W")
FNAME = "input.txt"
TEST_FNAME = "test_input.txt"
def main():
"""Main function."""
data = load_input(FNAME)
part1(data)
part2(data)
print("\nUnittests")
unittest.main()
def part1(data):
"""Solution to day 12, part 1."""
ship = Ship()
for instruction in data:
ship.action(instruction)
distance = ship.manhattan_distance()
print(f"The Manhattan distance is {distance}.")
return distance
def part2(data):
"""Solution to day 12, part 2."""
ship = ShipWithWaypoint()
for instruction in data:
ship.action(instruction)
distance = ship.manhattan_distance()
print(f"The Manhattan distance is {distance}.")
return distance
def load_input(fname):
"""Read in the data, return as a list."""
with open(fname, "r") as f:
data = f.readlines()
return data
class FloatingObject(metaclass=ABCMeta):
"""An object in a two-dimensional space.
The following convention for position is used: north (N) and east (E) are
assigned to positive values, south (S) and west (W) to negative values.
Following typical conventions, we formulate the coordinates as a (latitude,
longitude) pair.
Example
-------
Initial position: (0, 0)
Moving N10 results in a new position (10, 0).
Following this, moving S20 results in a new position (-10, 0).
"""
def __init__(self, initial_position):
"""Create an object with an initial position."""
# Call np.asarray twice to create two deepcopys.
self.initial_position = np.asarray(initial_position)
self.current_position = np.asarray(initial_position)
def update_position(self, direction, value):
"""Update the object's position."""
if direction == "N":
self.current_position[0] += value
return
if direction == "S":
self.current_position[0] -= value
return
if direction == "E":
self.current_position[1] += value
return
if direction == "W":
self.current_position[1] -= value
return
msg = "No valid direction indicated!"
raise ValueError(msg)
class Ship(FloatingObject):
"""A ship moving in a two-dimensional space."""
def __init__(self, initial_direction="E", initial_position=[0,0]):
super().__init__(initial_position)
self.current_direction_idx = CARDINAL_DIRECTIONS.index(initial_direction)
def action(self, instruction):
"""Perform an action, which can either be a movement or a rotation."""
direction = instruction[0]
value = int(instruction[1:])
if direction in ("L", "R"):
self.update_direction(direction, value)
else:
self.update_position(direction, value)
def update_position(self, direction, value):
"""Update the position of the ship."""
if direction == "F":
direction = CARDINAL_DIRECTIONS[self.current_direction_idx]
super().update_position(direction, value)
def update_direction(self, direction, degrees):
"""Update the direction by rotating X degrees to the left or right.
Note that currently, 'degrees' must be a multiple of 90.
"""
if abs(degrees) % 90 != 0:
msg = f"'degrees' is not a multiple of 90. degrees is: {degrees}"
raise ValueError(msg)
if direction not in ("L", "R"):
msg = "'direction' must be 'L' or 'R'."
raise ValueError(msg)
degrees = -1 * degrees if direction == "L" else degrees
self.current_direction_idx += degrees // 90
self.current_direction_idx %= len(CARDINAL_DIRECTIONS)
def manhattan_distance(self):
"""Return the sum of the absolute values of E/W and N/S position."""
distance = | np.abs(self.current_position - self.initial_position) | numpy.abs |
"""
Copyright (c) 2021, <NAME>
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import traceback as tb
from dataclasses import dataclass
import re as re
import numpy as np
it_function_dict = {}
class Accumulator:
def __init__(self, accumulation_seed):
self.accumulation_seed = accumulation_seed
def append(self, element_or_substring):
if isinstance(self.accumulation_seed, str):
self.accumulation_seed += element_or_substring
elif isinstance(self.accumulation_seed, list):
self.accumulation_seed.append(element_or_substring)
return Accumulator(self.accumulation_seed)
class StringBuilder:
def __init__(self):
self.string = ''
def __new__(cls):
return Accumulator('')
class PyKot:
def __init__(self, variable, recall=False):
self.variable = variable
self.recall = recall
self.var = variable
def __repr__(self):
return str(self.variable)
def last_index(self): # lastIndex()
raise_type_error_if_merited("last_index()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(len(self.variable) - 1)
def drop(self, drop_from_front: int): # drop(n)
raise_type_error_if_merited("drop(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[drop_from_front:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def drop_last(self, drop_from_back: int): # dropLast(n)
raise_type_error_if_merited("drop_last(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[:(len(self.variable) - drop_from_back)]
result = post_type_work(result, original_type)
return PyKot(result, True)
def drop_while(self, it_expression): # dropWhile(it expression)
raise_type_error_if_merited("drop_while(it expression)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
while it_expression.in_line_function(self.variable[0]):
self.variable = self.variable[1:]
result = post_type_work(self.variable, original_type)
return PyKot(result, True)
def drop_last_while(self, it_expression): # dropLastWhile(it expression)
raise_type_error_if_merited("drop_last_while(it expression)",
self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
while it_expression.in_line_function(self.variable[-1]):
self.variable = self.variable[:-1]
result = post_type_work(self.variable, original_type)
return PyKot(result, True)
def take(self, take_from_front: int): # take(n)
raise_type_error_if_merited("take(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[:take_from_front]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_last(self, take_from_back: int): # take_last(n)
raise_type_error_if_merited("take_last(Int)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
result = self.variable[len(self.variable) - take_from_back:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_while(self, it_expression): # take_while(it expression)
raise_type_error_if_merited("take_while(it expression)", self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
if type_compliance(self.variable, str):
result = ''
while it_expression.in_line_function(self.variable[0]):
result += self.variable[0]
self.variable = self.variable[1:]
else:
result = []
while it_expression.in_line_function(self.variable[0]):
result.append(self.variable[0])
self.variable = self.variable[1:]
result = post_type_work(result, original_type)
return PyKot(result, True)
def take_last_while(self, it_expression): # take_last_while(it expression)
raise_type_error_if_merited("take_last_while(it expression)",
self.variable, str, list, tuple, type(np.array([])))
self.variable, original_type = pre_type_work(self.variable)
if type_compliance(self.variable, str):
result = ''
while it_expression.in_line_function(self.variable[-1]):
result += self.variable[-1]
self.variable = self.variable[:-1]
else:
result = []
while it_expression.in_line_function(self.variable[-1]):
result.append(self.variable[-1])
self.variable = self.variable[:-1]
result = post_type_work(result, original_type)
return PyKot(result, True)
def length(self): # length()
raise_type_error_if_merited("length()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(len(self.variable))
def first(self): # first()
raise_type_error_if_merited("first()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(self.variable[0], True)
def last(self): # last()
raise_type_error_if_merited("last()", self.variable, str, list, tuple, type(np.array([])))
return PyKot(self.variable[-1], True)
def trim_margin(self, margin="|"): # trimMargin(margin)
raise_type_error_if_merited("trim_margin(margin='|')", self.variable, str)
return PyKot(self.variable[(self.variable.find(margin) + len(margin)):], True)
def compare_to(self, comparison: str, ignorecase=False): # compareTo(String, ignorecase=False)
self.variable, original_type = pre_type_work(self.variable)
comparison, original_type_comparison = pre_type_work(comparison)
if type_compliance(self.variable, dict):
self.variable = tuple(self.variable)
if type_compliance(comparison, dict):
comparison = tuple(comparison)
if ignorecase:
self.variable = self.variable.lower()
comparison = comparison.lower()
original = [self.variable, comparison]
sort_compare = [self.variable, comparison]
sort_compare.sort()
sort_compare = -1 if sort_compare == original else 1
return PyKot(0 if self.variable == comparison else sort_compare)
def sub_string(self, first_index, second_index): # subString(i, j)
raise_type_error_if_merited("sub_string(Int, Int)", self.variable, str)
first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)
if valid1:
first_index = first_index(self.variable)
if valid2:
second_index = second_index(self.variable)
return PyKot(self.variable[first_index: second_index], True)
def split(self, delimiter=' ', *additional_delimiters, ignorecase=False): # split(delimiter) or
# split(delimiter, ignorecase=True) or split(delimiter.toRegex()) or split(regex(delimiter))
raise_type_error_if_merited("split(delimiter=' ', *additional_delimiters, ignorecase=False)",
self.variable, str)
if ignorecase:
string = self.variable.lower()
delimiter_list = [delimiter.lower()] + [d.lower() for d in additional_delimiters]
else:
string = self.variable
delimiter_list = [delimiter] + [d for d in additional_delimiters]
if type_compliance(delimiter, type(re.compile(''))):
result = re.split(delimiter, self.variable)
else:
delimiter_indexes = []
found = 0
for delimiter in delimiter_list:
while found != -1 and (len(string) - found) >= len(delimiter):
found = string.find(delimiter, found, len(string) - 1)
if found == -1:
continue
delimiter_indexes.append(found)
found += len(delimiter)
delimiter_indexes.append(found)
found = 0
delimiter_indexes.append(0)
delimiter_indexes.sort()
delimiter_indexes.append(-1)
di = iter(delimiter_indexes)
delimiter_indexes = list(zip(di, di))
result = [self.variable[i:] if j == -1 else self.variable[i: j] for i, j in delimiter_indexes]
return PyKot(tuple(result), True)
def sub_sequence(self, first_index: int, second_index: int): # subSequence(i, j)
raise_type_error_if_merited("sub_string(Int, Int)", self.variable, str)
first_index, valid1, second_index, valid2 = unwrap_it(first_index, second_index)
if valid1:
first_index = first_index(self.variable)
if valid2:
second_index = second_index(self.variable)
return PyKot(self.variable[first_index: second_index], True)
def lines(self): # lines()
raise_type_error_if_merited("lines()", self.variable, str)
return PyKot(self.variable.splitlines(), True)
def capitalize(self): # capitalize()
raise_type_error_if_merited("capitalize()", self.variable, str)
return PyKot(self.variable.capitalize(), True)
def to_regex(self): # toRegex()
raise_type_error_if_merited("to_regex()", self.variable, str)
return re.compile(self.variable)
def replace(self, old_value: str, new_value: str, ignorecase=False): # replace(old, new, ignorecase=False)
raise_type_error_if_merited("replace(String, String, ignorecase=False)", self.variable, str)
if ignorecase:
find_index = self.variable.lower().find(old_value.lower())
if find_index == -1:
return PyKot(self.variable, True)
return PyKot(self.variable[:find_index] + new_value + self.variable[(find_index + len(old_value)):], True)
return PyKot(self.variable.replace(old_value, new_value), True)
def ends_with(self, substring): # endsWith(substring)
raise_type_error_if_merited("ends_with(String)", self.variable, str, list, tuple, type(np.array([])))
if type_compliance(self.variable, str):
result = True if self.variable[-len(substring):] == substring else False
else:
self.variable = unpack_array(self.variable)
result = True
for element in self.variable:
if not type_compliance(element, str):
raise TypeError("All elements in iterable must be a String to use ends_with()")
if result:
result = True if element[-len(substring):] == substring else False
return PyKot(result, True)
def plus(self, string_or_int): # plus(String) or plus(Int)
raise_type_error_if_merited("plus(String) or plus(Int)", self.variable, str, int)
if type_compliance(self.variable, str) and type_compliance(string_or_int, int):
string_or_int = str(string_or_int)
elif type_compliance(self.variable, int) and type_compliance(string_or_int, str):
string_or_int = int(string_or_int)
return PyKot(self.variable + string_or_int, True)
def get(self, index): # get()
raise_type_error_if_merited("get(Int)", self.variable, str, list, tuple, type(np.array([])), dict)
if isinstance(self.variable[index], type(np.array([1])[0])):
result = int(self.variable[index])
elif isinstance(self.variable[index], type(np.array([1.0])[0])):
result = float(self.variable[index])
elif isinstance(self.variable, dict):
result = self.variable[index] if index in self.variable.keys() else None
else:
result = self.variable[index]
return PyKot(result, True)
def to_string(self): # toString()
raise_type_error_if_merited("to_string()", self.variable, str, int, list, tuple, range, dict)
if isinstance(self.variable, str):
result = self.variable
else:
result = str(self.variable)
return PyKot(result, True)
def content_to_string(self): # contentToString()
raise_type_error_if_merited("content_to_string()", self.variable, list, tuple, type(np.array([])))
return PyKot(str([x for x in self.variable]), True)
def any(self, predicate=None): # any(predicate)
raise_type_error_if_merited("any(), any(value), or any(predicate)",
self.variable, list, tuple, dict, type(np.array([])))
result = unpack_array(self.variable)
if type_compliance(predicate, type(it())):
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
if not type_compliance(predicate, str, int):
result = True if len(list(filter(predicate, self.variable.items()))) > 0 else False
else:
if not type_compliance(predicate, str, int):
result = True if len(list(filter(predicate, result))) > 0 else False
if type_compliance(predicate, str, int):
if type_compliance(self.variable, dict):
result = True if predicate in self.variable.keys() else False
else:
result = True if predicate in self.variable else False
if predicate is None:
if self.variable:
result = True
else:
result = False
return PyKot(result, True)
def none(self): # any(predicate)
raise_type_error_if_merited("any(), any(value), or any(predicate)",
self.variable, list, tuple, dict, type(np.array([])))
return PyKot(False if unpack_array(self.variable) else True, True)
def to_list(self): # toList()
raise_type_error_if_merited("to_list()", self.variable, list, tuple, dict, type(np.array([])))
if type_compliance(self.variable, tuple):
result = self.variable
elif type_compliance(self.variable, dict):
result = tuple([(key, self.variable[key]) for key in self.variable.keys()])
else:
result = tuple(self.variable)
return PyKot(result, True)
def to_mutable_list(self): # toMutableList()
raise_type_error_if_merited("to_mutable_list()", self.variable, list, tuple, dict, type(np.array([])))
if isinstance(self.variable, tuple):
result = list(self.variable)
elif type_compliance(self.variable, dict):
result = [(key, self.variable[key]) for key in self.variable.keys()]
elif type_compliance(self.variable, type(np.array([]))):
result = [x for x in unpack_array(self.variable)]
else:
result = self.variable
return PyKot(result, True)
def contains(self, element): # contains(element)
raise_type_error_if_merited("contains()", self.variable, list, tuple, dict, type(np.array([])))
if isinstance(self.variable, dict):
return PyKot(element in self.variable.keys(), True)
return PyKot(element in self.variable, True)
def filter(self, predicate): # filter(predicate)
raise_type_error_if_merited("filter(function)", self.variable, list, tuple, dict, type(np.array([])))
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
new_map = dict(tuple(filter(predicate, self.variable.items())))
result = new_map
else:
result = list(filter(predicate, self.variable))
return PyKot(result, True)
def filter_not(self, predicate): # filterNot(predicate)
raise_type_error_if_merited("filter_not(function)", self.variable, list, tuple, dict, type(np.array([])))
predicate = predicate.in_line_function
if type_compliance(self.variable, dict):
new_map = {}
do_not_include = list(filter(predicate, self.variable.items()))
do_not_include = [x for x, y in do_not_include]
for key in self.variable.keys():
if key not in do_not_include:
new_map[key] = self.variable[key]
result = new_map
else:
new_list = []
do_not_include = list(filter(predicate, self.variable))
for value in [unpack_array_element(x) for x in self.variable]:
if value not in do_not_include:
new_list.append(value)
result = new_list
return PyKot(result, True)
def filter_indexed(self, predicate): # filter_indexed(predicate)
raise_type_error_if_merited("filter_indexed(predicate)", self.variable, list, tuple, type(np.array([])))
raise_type_error_if_merited("filter_indexed()", predicate, type(lambda x: x))
return PyKot([y for x, y in enumerate(unpack_array(self.variable)) if predicate(x, y)], True)
def filter_not_null(self): # filter_not_null()
raise_type_error_if_merited("filter_not_null()", self.variable, list, tuple, type(np.array([])))
return PyKot([x for x in unpack_array(self.variable) if x is not None])
def filter_is_instance(self, acceptable_type): # filter_is_instance(type)
raise_type_error_if_merited("filter_is_instance(acceptable_type)",
self.variable, list, tuple, type(np.array([])))
return PyKot([x for x in unpack_array(self.variable) if type(x) == acceptable_type])
def partition(self, predicate): # partition(predicate)
raise_type_error_if_merited("partition(predicate)", self.variable, list, tuple, type(np.array([])))
if type_compliance(predicate, type(it())):
predicate = predicate.in_line_function
match = []
rest = []
for element in unpack_array(self.variable):
if predicate(element):
match.append(element)
else:
rest.append(element)
return PyKot((tuple(match), tuple(rest)), True)
def for_each(self, *statements): # forEach( statements )
raise_type_error_if_merited("for_each(*statements)", self.variable, list, tuple, type( | np.array([]) | numpy.array |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.unittest.graph import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
# BatchNorm operation
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat1 operation
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
# ScaleShift -> Mul
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul (axis = 1)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul (Zero biases)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul->Add
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> None (Zero weights and biases)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}
,nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> ScaleShift (can_be_fused=False)
def test_scaleshift_can_be_fused(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
self.assertTrue(flag, resp)
class BatchNormDecomposition(unittest.TestCase):
def test_bn_decomposition_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': | np.array([3]) | numpy.array |
# LSTM(GRU) 예시 : KODEX200 주가 (2010 ~ 현재)를 예측해 본다.
# KODEX200의 종가와, 10일, 40일 이동평균을 이용하여 향후 10일 동안의 종가를 예측해 본다.
# 과거 20일 (step = 20) 종가, 이동평균 패턴을 학습하여 예측한다.
# 일일 주가에 대해 예측이 가능할까 ??
#
# 2018.11.22, 아마추어퀀트 (조성현)
# --------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from MyUtil import YahooData
nInput = 3
nOutput = 3
nStep = 20
nNeuron = 50
# 2차원 배열의 시계열 데이터로 학습용 배치 파일을 만든다.
# return : xBatch - RNN 입력
# yBatch - RNN 출력
#
# step = 2, n = 3 이라면,
# xData = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], ...]
# xBatch = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]], ...]
# yBatch = [[[4,5,6], [7,8,9]], [[10,11,12], [13,14,15]], ...]
def createTrainData(xData, step, n=nInput):
m = np.arange(len(xData) - step)
np.random.shuffle(m)
x, y = [], []
for i in m:
a = xData[i:(i+step)]
x.append(a)
xBatch = np.reshape(np.array(x), (len(m), step, n))
for i in m+1:
a = xData[i:(i+step)]
y.append(a)
yBatch = np.reshape(np.array(y), (len(m), step, n))
return xBatch, yBatch
# 주가 데이터
#df = YahooData.getStockDataYahoo('^KS11', start='2007-01-01')
df = pd.read_csv('StockData/^KS11.csv', index_col=0, parse_dates=True)
df = pd.DataFrame(df['Close'])
df['ma_10'] = pd.DataFrame(df['Close']).rolling(window=10).mean()
df['ma_40'] = pd.DataFrame(df['Close']).rolling(window=40).mean()
df = df.dropna()
df = (df - df.mean()) / df.std()
# 학습 데이터를 생성한다.
data = np.array(df)
xBatch, yBatch = createTrainData(data, nStep)
# RNN 그래프를 생성한다 (Wx, Wh). xBatch를 RNN에 입력한다.
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, nStep, nInput])
rnn = tf.nn.rnn_cell.LSTMCell(nNeuron)
#rnn = tf.nn.rnn_cell.GRUCell(nNeuron)
output, state = tf.nn.dynamic_rnn(rnn, x, dtype=tf.float32)
# RNN의 출력값을 입력으로 받아 3개의 y가 출력되도록 하는 feed-forward network를 생성한다. (Wy)
y = tf.placeholder(tf.float32, [None, nStep, nOutput])
inFC = tf.reshape(output, [-1, nNeuron])
fc1 = tf.contrib.layers.fully_connected(inputs=inFC, num_outputs=nNeuron)
predY = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=nOutput, activation_fn=None)
predY = tf.reshape(predY, [-1, nStep, nOutput])
# Mean square error (MSE)로 Loss를 정의한다. xBatch가 입력되면 yBatch가 출력되도록 함.
loss = tf.reduce_sum(tf.square(predY - y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
minLoss = optimizer.minimize(loss)
# 그래프를 실행한다. 학습한다. (Wx, Wh, Wy를 업데이트함)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
lossHist = []
for i in range(300):
sess.run(minLoss, feed_dict={x: xBatch, y: yBatch})
if i % 5 == 0:
ploss = sess.run(loss, feed_dict={x: xBatch, y: yBatch})
lossHist.append(ploss)
print(i, "\tLoss:", ploss)
# 향후 10 기간 데이터를 예측한다. 향후 1 기간을 예측하고, 예측값을 다시 입력하여 2 기간을 예측한다.
# 이런 방식으로 10 기간까지 예측한다.
nFuture = 10
if len(data) > 100:
lastData = np.copy(data[-100:]) # 원 데이터의 마지막 100개만 그려본다
else:
lastData = np.copy(data)
dx = | np.copy(lastData) | numpy.copy |
import types
import numpy as np
import pytest
import torch
from combine1d.core.cost_function import (get_indices_for_unknown_parameters,
get_known_parameters, create_cost_fct,
initialise_flowline,
initialise_mb_models, get_cost_terms,
cost_fct,
calculate_difference_between_observation_and_model,
define_reg_parameters, get_gradients,
do_height_shift_spinup)
from combine1d.core.first_guess import get_first_guess
from combine1d.core.dynamics import run_model_and_get_temporal_model_data
pytestmark = pytest.mark.filterwarnings("ignore:<class 'combine1d.core.torch_interp1d.Interp1d'> "
"should not be instantiated.:DeprecationWarning")
class TestCreateCostFct:
def test_get_known_parameters(self, data_logger_init):
data_logger = data_logger_init
known_parameters = get_known_parameters(data_logger)
fl = data_logger.flowline_init
for key in known_parameters.keys():
if 'area_bed_h' in data_logger.control_vars:
assert 'bed_h' in known_parameters
else:
assert key in data_logger.control_vars
if key in ['w0_m', 'lambdas']:
prefix = '_'
mask = (data_logger.is_trapezoid & data_logger.ice_mask)
elif key in ['bed_h']:
prefix = ''
mask = data_logger.ice_mask
elif key in ['area_bed_h']:
prefix = ''
mask = data_logger.ice_mask
key = 'bed_h'
elif key in ['surface_h']:
prefix = ''
mask = np.full(data_logger.ice_mask.shape, True)
assert np.allclose(known_parameters[key],
getattr(fl, prefix + key)[~mask],
equal_nan=True)
def test_create_indices_for_unknown_parameters(self, data_logger_init):
data_logger = data_logger_init
parameter_indices = get_indices_for_unknown_parameters(data_logger)
all_indices = np.array([])
for key in parameter_indices.keys():
assert key in data_logger.control_vars
all_indices = np.append(all_indices, parameter_indices[key])
# check that start with 0, every index is only present once, ends at len() - 1
values, counts = np.unique(all_indices, return_counts=True)
assert np.allclose(counts, 1)
assert np.min(values) == 0
assert | np.max(values) | numpy.max |
#!/bin/env python
def co2_emissions(yr, escheme):
from scipy.interpolate import interp1d
import numpy as np
## historical emissions
time = np.arange(1764, 2006, step=1)
emit_hist = [0,0.003,0.003,0.003,0.003,0.003,0.003,0.004,0.004,0.004,0.004,0.004,0.004,0.004,0.004,0.004,0.004,
0.005,0.005,0.005,0.005,0.005,0.005,0.005,0.005,0.005,0.005,0.006,0.006,0.006,0.006,0.006,0.006,0.007,
0.007,0.007,0.008,0.008,0.010,0.009,0.009,0.009,0.010,0.010,0.010,0.010,0.010,0.011,0.011,0.011,0.011,
0.012,0.013,0.014,0.014,0.014,0.014,0.014,0.015,0.016,0.016,0.017,0.017,0.018,0.018,0.018,0.024,0.023,
0.023,0.024,0.024,0.025,0.029,0.029,0.030,0.031,0.033,0.034,0.036,0.037,0.039,0.043,0.043,0.046,0.047,
0.050,0.054,0.054,0.057,0.059,0.069,0.071,0.076,0.077,0.078,0.083,0.091,0.095,0.097,0.104,0.112,0.119,
0.122,0.130,0.135,0.142,0.147,0.156,0.173,0.184,0.174,0.188,0.191,0.195,0.196,0.210,0.236,0.243,0.256,
0.272,0.275,0.277,0.281,0.295,0.327,0.327,0.356,0.372,0.374,0.370,0.383,0.406,0.419,0.440,0.465,0.507,
0.534,0.552,0.566,0.617,0.624,0.663,0.707,0.784,0.750,0.785,0.819,0.836,0.879,0.943,0.850,0.838,0.901,
0.955,0.936,0.806,0.932,0.803,0.845,0.970,0.963,0.975,0.983,1.062,1.065,1.145,1.053,0.940,0.847,0.893,
0.973,1.027,1.130,1.209,1.142,1.192,1.299,1.334,1.342,1.391,1.383,1.160,1.238,1.392,1.469,1.419,1.630,
1.768,1.796,1.841,1.865,2.043,2.178,2.270,2.330,2.462,2.577,2.594,2.700,2.848,3.008,3.145,3.305,3.411,
3.588,3.800,4.076,4.231,4.399,4.635,4.644,4.615,4.883,5.029,5.105,5.387,5.332,5.168,5.127,5.110,5.290,
5.444,5.610,5.753,5.964,6.089,6.144,6.235,6.118,6.124,6.242,6.372,6.510,6.619,6.588,6.569,6.735,6.896,
6.949,7.286,7.672,7.971]
if escheme == "rcp85":
time2 = np.arange(2006, 2101, step=1)
time = np.concatenate([time, time2])
emit_future = [8.162,8.352,8.543,8.735,8.926,9.187,9.448,9.709,9.970,10.232,10.493,10.754,11.015,
11.276,11.538,11.768,11.998,12.228,12.458,12.688,12.918,13.149,13.379,13.609,13.839,
14.134,14.429,14.723,15.018,15.313,15.608,15.902,16.197,16.492,16.787,17.128,17.470,
17.812,18.154,18.496,18.837,19.179,19.521,19.863,20.205,20.544,20.883,21.222,21.561,
21.900,22.240,22.579,22.918,23.257,23.596,23.833,24.069,24.306,24.543,24.779,25.016,
25.252,25.489,25.726,25.962,26.107,26.251,26.395,26.540,26.684,26.829,26.973,27.117,
27.262,27.406,27.499,27.592,27.685,27.778,27.871,27.964,28.058,28.151,28.244,28.337,
28.377,28.417,28.458,28.498,28.538,28.579,28.619,28.659,28.700,28.740]
emit = np.concatenate([emit_hist, emit_future])
elif escheme == "pulse":
time = time.transpose()
emit = time * 0
emit[time == 1800] = 10 # single year of pulsed emissions
else:
time = time.transpose()
emit = time * 0
#time = [-1e6, time, 1e6]
# time = np.array([-1e6, time, 1e6])
time = np.insert(time, 0, -1e6, axis=0)
time = np.append(time, 1e6)
# emit = [0, emit, emit[-1]]
emit = np.insert(emit, 0, 0, axis=0)
emit = np.append(emit, emit[-1])
# FF=interp1(time,emit,yr);
#FF = interp1d(time, emit, yr)
FF_fctn = interp1d(time, emit)
FF = FF_fctn(yr)
return(FF)
def calc_pco2(t, s, ta, c, phg):
'''
this function calculates the partial pressure of co2
'''
import numpy as np
pt = 0e-3
sit = 40.0e-3
tk = 273.15 + t
tk100 = tk / 100.0
tk1002 = tk100**2
invtk = 1.0 / tk
dlogtk = np.log(tk)
### note this variable has to change names since "is" is inbuilt in python
# is = 19.924*s./(1000.-1.005*s);
iss = 19.924 * s / (1000. - 1.005 * s)
# is2 =is.*is;
iss2 = iss**2
sqrtis = np.sqrt(iss)
s2 = s**2
sqrts = np.sqrt(s)
s15 = s ** 1.5
scl = s / 1.80655
fflocal = (np.exp(-162.8301 + 218.2968 / tk100 +
90.9241 * np.log(tk100) - 1.47696 * tk1002 +
s * (.025695 - .025225 * tk100 +
0.0049867 * tk1002)))
k0local = (np.exp(93.4517 / tk100 - 60.2409 +
23.3585 * np.log(tk100) +
s * (0.023517 - 0.023656 * tk100 +
0.0047036 * tk1002)))
k1local = 10**((-1 * (3670.7 * invtk -
62.008 + 9.7944 * dlogtk -
0.0118 * s + 0.000116 * s2)))
k2local = 10**(-1 * (1394.7 * invtk + 4.777 -
0.0184 * s + 0.000118 * s2))
kblocal = np.exp((-8966.90 - 2890.53 * sqrts - 77.942 * s +
1.728 * s15 - 0.0996 * s2) * invtk +
(148.0248 + 137.1942 * sqrts + 1.62142 * s) +
(-24.4344 - 25.085 * sqrts - 0.2474 * s) *
dlogtk + 0.053105 *sqrts * tk)
k1plocal = np.exp(-4576.752 * invtk + 115.525 -
18.453 * dlogtk +
(-106.736 * invtk + 0.69171) * sqrts +
(-0.65643 * invtk - 0.01844) * s)
k2plocal = np.exp(-8814.715 * invtk + 172.0883 -
27.927 * dlogtk +
(-160.340 * invtk + 1.3566) * sqrts +
(0.37335 * invtk - 0.05778) * s)
k3plocal = np.exp(-3070.75 * invtk - 18.141 +
(17.27039 * invtk + 2.81197) *
sqrts + (-44.99486 * invtk - 0.09984) * s)
ksilocal = np.exp(-8904.2 * invtk + 117.385 -
19.334 * dlogtk +
(-458.79 * invtk + 3.5913) * sqrtis +
(188.74 * invtk - 1.5998) * iss +
(-12.1652 * invtk + 0.07871) * iss2 +
np.log(1.0 - 0.001005 * s))
kwlocal = np.exp(-13847.26 * invtk + 148.9652 -
23.6521 * dlogtk +
(118.67 * invtk - 5.977 + 1.0495 * dlogtk) *
sqrts - 0.01615 * s)
kslocal = np.exp(-4276.1 * invtk + 141.328 -
23.093 * dlogtk +
(-13856 * invtk + 324.57 - 47.986 * dlogtk) *sqrtis +
(35474 * invtk - 771.54 + 114.723 * dlogtk) *iss -
2698 * invtk * iss**1.5 + 1776 * invtk * iss2 +
np.log(1.0 - 0.001005 * s))
kflocal = np.exp(1590.2 * invtk - 12.641 + 1.525 * sqrtis +
np.log(1.0 - 0.001005 * s) +
np.log(1.0 + (0.1400 / 96.062) * (scl) / kslocal))
btlocal = 0.000232 * scl/10.811
stlocal = 0.14 * scl/96.062
ftlocal = 0.000067 * scl/18.998
pHlocal = phg
permil =1.0 / 1024.5
pt = pt * permil
sit = sit * permil
ta = ta * permil
c = c * permil
####################
## start iteration ##
####################
phguess = pHlocal
hguess = 10.0**(-phguess)
bohg = btlocal*kblocal / (hguess + kblocal)
stuff = (hguess * hguess * hguess
+ (k1plocal * hguess * hguess)
+ (k1plocal * k2plocal * hguess)
+ (k1plocal * k2plocal * k3plocal))
h3po4g = (pt * hguess * hguess * hguess) / stuff
h2po4g = (pt * k1plocal * hguess * hguess) / stuff
hpo4g = (pt * k1plocal * k2plocal * hguess) / stuff
po4g = (pt * k1plocal * k2plocal * k3plocal) / stuff
siooh3g = sit * ksilocal / (ksilocal + hguess);
cag = (ta - bohg - (kwlocal / hguess) + hguess
- hpo4g - 2.0*po4g + h3po4g - siooh3g)
gamm = c / cag
hnew = (0.5 * (-k1local * (1 - gamm) + np.sqrt((k1local**2) * (1 - gamm)**2
+4 * k1local * k2local * (2 * gamm - 1) ) ))
pHlocal_new = -np.log10(hnew)
pHlocal = pHlocal_new
pco2local = (c / fflocal / (1.0 + (k1local / hnew) +
(k1local * k2local / (hnew**2))))
fflocal = fflocal / permil
return(pco2local, pHlocal, fflocal)
def get_matrix_index(arr_row_num, arr_col_num, row_ind, col_ind):
import numpy as np
pool_indices = []
element_nums = np.arange(0, 9*5).reshape(arr_col_num, arr_row_num).transpose()
for ind in range(0, len(row_ind)):
# print(element_nums[row_ind[ind], col_ind[0][ind]])
pool_indices.append(element_nums[row_ind[ind], col_ind[0][ind]])
return(pool_indices)
def carbon_climate_derivs(t, y, PE, PS, PL, PO):
'''
this is the main function for the box model
'''
import numpy as np
from scipy.interpolate import interp1d
#import seawater as sw
# added the necessary seawater functions to their own .py module
from seawater_functions import dens0, dens, seck, T68conv
Tloc = y[PE['Jtmp']].transpose()
Nloc = y[PE['Jnut']].transpose()
Dloc = y[PE['Jcoc']].transpose()
Cloc = y[PE['Jcla']]
patm = y[PE['Jatm']]
## special cases for ocean carbon pumps
# homogenize T,S if no solubility pump (for pCO2 only)
############################### NOTE: Need to add T from whatever dict it's coming from ####################################
if PS['DoOcn'] == 1:
Tsol = PO['T']
Ssol = PO['S']
if PS['DoOcnSol'] == 0:
Ttmp=Tsol.flatten()
Stmp=Ssol.flatten()
Tsol[0,PO['Isfc']] = np.sum(Ttmp[PO['Isfc']] * PO['A'][PO['Isfc']]) / np.sum(PO['A'][PO['Isfc']])
Ssol[0,PO['Isfc']] = np.sum(Stmp[PO['Isfc']] * PO['A'][PO['Isfc']]) / np.sum(PO['A'][PO['Isfc']])
# homogenize alkalinity if no bio pump
TAsol = PO['TA']
if PS['DoOcnBio'] == 0:
TAsol[PO['Isfc']] = np.sum(PO['TA'][PO['Isfc']] * PO['A'][PO['Isfc']]) / np.sum(PO['A'][PO['Isfc']])
## update basic quantities
# time
ymod = t / PE['spery'] # year in model time (starting from 0)
ycal = ymod - PS['yspin'] + PS['ypert'] # calendar year (negative means "BCE")
if ycal < PS['ypert']:
doAtm = 0 # hold atmospheric co2 constant to equilibrate
else:
doAtm = 1 # allow atmospheric co2 to evolve
# interp1d example
# matlab: interp1(x, y, xn, 'linear')
# python: yn_f2 = interp1d(x[::-1], y[::-1])
# python: yn_py2 = yn_f2(xn)
# atmosphere + climate
FF = co2_emissions(ycal, PS['escheme']) # fossil fuel co2 emissions (Pg/yr)
# [ycal FF]
FF = FF * 1e15 / 12 / PE['spery'] # convert to molC/s
RFco2 = 5.35 * np.log(patm / PE['patm0']) * PS['DoRadCO2'] # radiative forcing from CO2
RFsto=np.interp(round(ycal),PS['Yint'].transpose(), PS['RFint'].transpose())
RF = (RFco2 + np.nansum(RFsto)) * doAtm
dTbar = np.sum(Tloc[PO['Isfc']] * PO['A'][PO['Isfc']]) / np.sum(PO['A'][PO['Isfc']])
#------ terrestrial
NPPfac = 1 + np.interp(ycal,PS['Yint'].transpose(), PS['NPPint'].transpose())
NPP = PL['NPP_o'] * NPPfac * (1 + PS['CCC_LC'] * PL['beta_fert'] * np.log(patm / PE['patm0'])) # perturbation NPP
#krate = np.diag(PL['kbase']) * PL['Q10_resp']**(PS['CCC_LT'] * dTbar / 10) # scaled turnover rate
krate = PL['kbase'] * PL['Q10_resp']**(PS['CCC_LT'] * dTbar / 10) # scaled turnover rate (vector)
## create a matrix version of krate with values on the diagonal
krate_diag = | np.zeros((krate.shape[0], krate.shape[0])) | numpy.zeros |
import numpy as np
from astropy.tests.helper import pytest
from stingray import Lightcurve
from stingray import Powerspectrum, AveragedPowerspectrum, \
DynamicalPowerspectrum
from stingray.powerspectrum import classical_pvalue
np.random.seed(20150907)
class TestPowerspectrum(object):
@classmethod
def setup_class(cls):
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.arange(tstart + 0.5*dt, tend + 0.5*dt, dt)
mean_count_rate = 100.0
mean_counts = mean_count_rate * dt
poisson_counts = np.random.poisson(mean_counts,
size=time.shape[0])
cls.lc = Lightcurve(time, counts=poisson_counts, dt=dt,
gti=[[tstart, tend]])
def test_make_empty_periodogram(self):
ps = Powerspectrum()
assert ps.norm == "frac"
assert ps.freq is None
assert ps.power is None
assert ps.power_err is None
assert ps.df is None
assert ps.m == 1
assert ps.n is None
def test_make_periodogram_from_lightcurve(self):
ps = Powerspectrum(lc=self.lc)
assert ps.freq is not None
assert ps.power is not None
assert ps.power_err is not None
assert ps.df == 1.0 / self.lc.tseg
assert ps.norm == "frac"
assert ps.m == 1
assert ps.n == self.lc.time.shape[0]
assert ps.nphots == np.sum(self.lc.counts)
def test_periodogram_types(self):
ps = Powerspectrum(lc=self.lc)
assert isinstance(ps.freq, np.ndarray)
assert isinstance(ps.power, np.ndarray)
assert isinstance(ps.power_err, np.ndarray)
def test_init_with_lightcurve(self):
assert Powerspectrum(self.lc)
def test_init_without_lightcurve(self):
with pytest.raises(TypeError):
assert Powerspectrum(self.lc.counts)
def test_init_with_nonsense_data(self):
nonsense_data = [None for i in range(100)]
with pytest.raises(TypeError):
assert Powerspectrum(nonsense_data)
def test_init_with_nonsense_norm(self):
nonsense_norm = "bla"
with pytest.raises(ValueError):
assert Powerspectrum(self.lc, norm=nonsense_norm)
def test_init_with_wrong_norm_type(self):
nonsense_norm = 1.0
with pytest.raises(TypeError):
assert Powerspectrum(self.lc, norm=nonsense_norm)
def test_total_variance(self):
"""
the integral of powers (or Riemann sum) should be close
to the variance divided by twice the length of the light curve.
Note: make sure the factors of ncounts match!
Also, make sure to *exclude* the zeroth power!
"""
ps = Powerspectrum(lc=self.lc)
nn = ps.n
pp = ps.unnorm_power / np.float(nn) ** 2
p_int = np.sum(pp[:-1] * ps.df) + (pp[-1] * ps.df) / 2
var_lc = np.var(self.lc.counts) / (2. * self.lc.tseg)
assert np.isclose(p_int, var_lc, atol=0.01, rtol=0.01)
def test_frac_normalization_is_standard(self):
"""
Make sure the standard normalization of a periodogram is
rms and it stays that way!
"""
ps = Powerspectrum(lc=self.lc)
assert ps.norm == "frac"
def test_frac_normalization_correct(self):
"""
In fractional rms normalization, the integral of the powers should be
equal to the variance of the light curve divided by the mean
of the light curve squared.
"""
ps = Powerspectrum(lc=self.lc, norm="frac")
ps_int = np.sum(ps.power[:-1] * ps.df) + ps.power[-1] * ps.df / 2
std_lc = np.var(self.lc.counts) / np.mean(self.lc.counts) ** 2
assert np.isclose(ps_int, std_lc, atol=0.01, rtol=0.01)
def test_fractional_rms_in_frac_norm_is_consistent(self):
time = np.arange(0, 100, 1) + 0.5
poisson_counts = np.random.poisson(100.0,
size=time.shape[0])
lc = Lightcurve(time, counts=poisson_counts, dt=1,
gti=[[0, 100]])
ps = Powerspectrum(lc=lc, norm="leahy")
rms_ps_l, rms_err_l = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1], white_noise_offset=0)
ps = Powerspectrum(lc=lc, norm="frac")
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1], white_noise_offset=0)
assert np.allclose(rms_ps, rms_ps_l, atol=0.01)
assert np.allclose(rms_err, rms_err_l, atol=0.01)
def test_fractional_rms_in_frac_norm_is_consistent_averaged(self):
time = np.arange(0, 400, 1) + 0.5
poisson_counts = np.random.poisson(100.0,
size=time.shape[0])
lc = Lightcurve(time, counts=poisson_counts, dt=1,
gti=[[0, 400]])
ps = AveragedPowerspectrum(lc=lc, norm="leahy", segment_size=100)
rms_ps_l, rms_err_l = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1], white_noise_offset=0)
ps = AveragedPowerspectrum(lc=lc, norm="frac", segment_size=100)
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1], white_noise_offset=0)
assert np.allclose(rms_ps, rms_ps_l, atol=0.01)
assert np.allclose(rms_err, rms_err_l, atol=0.01)
def test_fractional_rms_in_frac_norm(self):
time = np.arange(0, 400, 1) + 0.5
poisson_counts = np.random.poisson(100.0,
size=time.shape[0])
lc = Lightcurve(time, counts=poisson_counts, dt=1,
gti=[[0, 400]])
ps = AveragedPowerspectrum(lc=lc, norm="frac", segment_size=100)
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1],
white_noise_offset=0)
rms_lc = np.std(lc.counts) / np.mean(lc.counts)
assert np.isclose(rms_ps, rms_lc, atol=0.01)
def test_leahy_norm_Poisson_noise(self):
"""
In Leahy normalization, the poisson noise level (so, in the absence of
a signal, the average power) should be equal to 2.
"""
time = np.linspace(0, 10.0, 1e5)
counts = np.random.poisson(1000, size=time.shape[0])
lc = Lightcurve(time, counts)
ps = Powerspectrum(lc, norm="leahy")
assert np.isclose(np.mean(ps.power[1:]), 2.0, atol=0.01, rtol=0.01)
def test_leahy_norm_total_variance(self):
"""
In Leahy normalization, the total variance should be the sum of
powers multiplied by the number of counts and divided by the
square of the number of data points in the light curve
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
ps_var = (np.sum(self.lc.counts) / ps.n ** 2.) * \
(np.sum(ps.power[:-1]) + ps.power[-1] / 2.)
assert np.isclose(ps_var, np.var(self.lc.counts), atol=0.01)
def test_fractional_rms_in_leahy_norm(self):
"""
fractional rms should only be *approximately* equal the standard
deviation divided by the mean of the light curve. Therefore, we allow
for a larger tolerance in np.isclose()
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
max_freq=ps.freq[-1])
rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
assert np.isclose(rms_ps, rms_lc, atol=0.01)
def test_fractional_rms_fails_when_rms_not_leahy(self):
with pytest.raises(Exception):
ps = Powerspectrum(lc=self.lc, norm="rms")
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
max_freq=ps.freq[-1])
def test_abs_norm_Poisson_noise(self):
"""
Poisson noise level for a light curve with absolute rms-squared
normalization should be approximately 2 * the mean count rate of the
light curve.
"""
time = np.linspace(0, 1., 1e4)
counts = np.random.poisson(0.01, size=time.shape[0])
lc = Lightcurve(time, counts)
ps = Powerspectrum(lc, norm="abs")
print(lc.counts/lc.tseg)
abs_noise = 2. * 100 # expected Poisson noise level;
# hardcoded value from above
print(np.mean(ps.power[1:]), abs_noise)
assert np.isclose(np.mean(ps.power[1:]), abs_noise, atol=30)
def test_fractional_rms_error(self):
"""
TODO: Need to write a test for the fractional rms error.
But I don't know how!
"""
pass
def test_rebin_makes_right_attributes(self):
ps = Powerspectrum(lc=self.lc, norm="Leahy")
# replace powers
ps.power = np.ones_like(ps.power) * 2.0
rebin_factor = 2
bin_ps = ps.rebin(rebin_factor*ps.df)
assert bin_ps.freq is not None
assert bin_ps.power is not None
assert bin_ps.power is not None
assert bin_ps.df == rebin_factor * 1.0 / self.lc.tseg
assert bin_ps.norm.lower() == "leahy"
assert bin_ps.m == 2
assert bin_ps.n == self.lc.time.shape[0]
assert bin_ps.nphots == np.sum(self.lc.counts)
def test_rebin_uses_mean(self):
"""
Make sure the rebin-method uses "mean" to average instead of summing
powers by default, and that this is not changed in the future!
Note: function defaults come as a tuple, so the first keyword argument
had better be 'method'
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
assert ps.rebin.__defaults__[2] == "mean"
@pytest.mark.parametrize('df', [2, 3, 5, 1.5, 1, 85])
def test_rebin(self, df):
"""
TODO: Not sure how to write tests for the rebin method!
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
bin_ps = ps.rebin(df)
assert np.isclose(bin_ps.freq[1] - bin_ps.freq[0], bin_ps.df,
atol=1e-4, rtol=1e-4)
assert np.isclose(bin_ps.freq[0],
(ps.freq[0] - ps.df * 0.5 + bin_ps.df * 0.5),
atol=1e-4, rtol=1e-4)
def test_classical_significances_runs(self):
ps = Powerspectrum(lc=self.lc, norm="Leahy")
ps.classical_significances()
def test_classical_significances_fails_in_rms(self):
ps = Powerspectrum(lc=self.lc, norm="frac")
with pytest.raises(ValueError):
ps.classical_significances()
def test_classical_significances_threshold(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.power = np.zeros_like(ps.power) + 2.0
index = 1
ps.power[index] = 10.0
threshold = 0.01
pval = ps.classical_significances(threshold=threshold,
trial_correction=False)
assert pval[0, 0] < threshold
assert pval[1, 0] == index
def test_classical_significances_trial_correction(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.power = np.zeros_like(ps.power) + 2.0
index = 1
ps.power[index] = 10.0
threshold = 0.01
pval = ps.classical_significances(threshold=threshold,
trial_correction=True)
assert np.size(pval) == 0
def test_classical_significances_with_logbinned_psd(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
ps_log = ps.rebin_log()
pval = ps_log.classical_significances(threshold=1.1, trial_correction=False)
assert len(pval[0]) == len(ps_log.power)
def test_pvals_is_numpy_array(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.power = np.zeros_like(ps.power) + 2.0
index = 1
ps.power[index] = 10.0
threshold = 1.0
pval = ps.classical_significances(threshold=threshold,
trial_correction=True)
assert isinstance(pval, np.ndarray)
assert pval.shape[0] == 2
class TestAveragedPowerspectrum(object):
@classmethod
def setup_class(cls):
tstart = 0.0
tend = 10.0
dt = 0.0001
time = np.arange(tstart + 0.5*dt, tend + 0.5*dt, dt)
mean_count_rate = 1000.0
mean_counts = mean_count_rate * dt
poisson_counts = np.random.poisson(mean_counts,
size=time.shape[0])
cls.lc = Lightcurve(time, counts=poisson_counts, gti=[[tstart, tend]],
dt=dt)
def test_one_segment(self):
segment_size = self.lc.tseg
ps = AveragedPowerspectrum(self.lc, segment_size)
assert np.isclose(ps.segment_size, segment_size)
def test_make_empty_periodogram(self):
ps = AveragedPowerspectrum()
assert ps.norm == "frac"
assert ps.freq is None
assert ps.power is None
assert ps.power_err is None
assert ps.df is None
assert ps.m == 1
assert ps.n is None
@pytest.mark.parametrize('nseg', [1, 2, 3, 5, 10, 20, 100])
def test_n_segments(self, nseg):
segment_size = self.lc.tseg/nseg
ps = AveragedPowerspectrum(self.lc, segment_size)
assert ps.m == nseg
def test_segments_with_leftover(self):
segment_size = self.lc.tseg / 2. - 1.
ps = AveragedPowerspectrum(self.lc, segment_size)
assert np.isclose(ps.segment_size, segment_size)
assert ps.m == 2
def test_init_without_segment(self):
with pytest.raises(ValueError):
assert AveragedPowerspectrum(self.lc)
def test_init_with_nonsense_segment(self):
segment_size = "foo"
with pytest.raises(TypeError):
assert AveragedPowerspectrum(self.lc, segment_size)
def test_init_with_none_segment(self):
segment_size = None
with pytest.raises(ValueError):
assert AveragedPowerspectrum(self.lc, segment_size)
def test_init_with_inf_segment(self):
segment_size = np.inf
with pytest.raises(ValueError):
assert AveragedPowerspectrum(self.lc, segment_size)
def test_init_with_nan_segment(self):
segment_size = np.nan
with pytest.raises(ValueError):
assert AveragedPowerspectrum(self.lc, segment_size)
def test_list_of_light_curves(self):
n_lcs = 10
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.arange(tstart + 0.5*dt, tend + 0.5*dt, dt)
mean_count_rate = 1000.0
mean_counts = mean_count_rate * dt
lc_all = []
for n in range(n_lcs):
poisson_counts = np.random.poisson(mean_counts,
size=len(time))
lc = Lightcurve(time, counts=poisson_counts, gti=[[tstart, tend]],
dt=dt)
lc_all.append(lc)
segment_size = 0.5
assert AveragedPowerspectrum(lc_all, segment_size)
@pytest.mark.parametrize('df', [2, 3, 5, 1.5, 1, 85])
def test_rebin(self, df):
"""
TODO: Not sure how to write tests for the rebin method!
"""
aps = AveragedPowerspectrum(lc=self.lc, segment_size=1,
norm="Leahy")
bin_aps = aps.rebin(df)
assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df,
atol=1e-4, rtol=1e-4)
assert np.isclose(bin_aps.freq[0],
(aps.freq[0]-aps.df*0.5+bin_aps.df*0.5),
atol=1e-4, rtol=1e-4)
@pytest.mark.parametrize('f', [20, 30, 50, 15, 1, 850])
def test_rebin_factor(self, f):
"""
TODO: Not sure how to write tests for the rebin method!
"""
aps = AveragedPowerspectrum(lc=self.lc, segment_size=1,
norm="Leahy")
bin_aps = aps.rebin(f=f)
assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df,
atol=1e-4, rtol=1e-4)
assert np.isclose(bin_aps.freq[0],
(aps.freq[0]-aps.df*0.5+bin_aps.df*0.5),
atol=1e-4, rtol=1e-4)
@pytest.mark.parametrize('df', [0.01, 0.1])
def test_rebin_log(self, df):
# For now, just verify that it doesn't crash
aps = AveragedPowerspectrum(lc=self.lc, segment_size=1,
norm="Leahy")
bin_aps = aps.rebin_log(df)
def test_rebin_with_invalid_type_attribute(self):
new_df = 2
aps = AveragedPowerspectrum(lc=self.lc, segment_size=1,
norm='leahy')
aps.type = 'invalid_type'
with pytest.raises(AttributeError):
assert aps.rebin(df=new_df)
def test_list_with_nonsense_component(self):
n_lcs = 10
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.linspace(tstart, tend, int((tend - tstart) / dt))
mean_count_rate = 1000.0
mean_counts = mean_count_rate * dt
lc_all = []
for n in range(n_lcs):
poisson_counts = np.random.poisson(mean_counts,
size=len(time))
lc = Lightcurve(time, counts=poisson_counts)
lc_all.append(lc)
lc_all.append(1.0)
segment_size = 0.5
with pytest.raises(TypeError):
assert AveragedPowerspectrum(lc_all, segment_size)
def test_leahy_correct_for_multiple(self):
n = 10
lc_all = []
for i in range(n):
time = np.arange(0.0, 10.0, 10. / 10000)
counts = np.random.poisson(1000, size=time.shape[0])
lc = Lightcurve(time, counts)
lc_all.append(lc)
ps = AveragedPowerspectrum(lc_all, 1.0, norm="leahy")
assert np.isclose(np.mean(ps.power), 2.0, atol=1e-2, rtol=1e-2)
assert np.isclose(np.std(ps.power), 2.0 / np.sqrt(n*10), atol=0.1,
rtol=0.1)
class TestClassicalSignificances(object):
def test_function_runs(self):
power = 2.0
nspec = 1.0
classical_pvalue(power, nspec)
def test_power_is_not_infinite(self):
power = np.inf
nspec = 1
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_not_infinite2(self):
power = -np.inf
nspec = 1
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_non_nan(self):
power = np.nan
nspec = 1
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_positive(self):
power = -2.0
nspec = 1.0
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_infinite(self):
power = 2.0
nspec = np.inf
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_infinite2(self):
power = 2.0
nspec = -np.inf
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_nan(self):
power = 2.0
nspec = np.nan
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_positive(self):
power = 2.0
nspec = -1.0
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_nonzero(self):
power = 2.0
nspec = 0.0
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_an_integer_number(self):
power = 2.0
nspec = 2.5
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_float_type_okay(self):
power = 2.0
nspec = 2.0
classical_pvalue(power, nspec)
def test_pvalue_decreases_with_increasing_power(self):
power1 = 2.0
power2 = 20.0
nspec = 1.0
pval1 = classical_pvalue(power1, nspec)
pval2 = classical_pvalue(power2, nspec)
assert pval1 - pval2 > 0.0
def test_pvalue_must_decrease_with_increasing_nspec(self):
power = 3.0
nspec1 = 1.0
nspec2 = 10.0
pval1 = classical_pvalue(power, nspec1)
pval2 = classical_pvalue(power, nspec2)
assert pval1 - pval2 > 0.0
def test_very_large_powers_produce_zero_prob(self):
power = 31000.0
nspec = 1
pval = classical_pvalue(power, nspec)
assert np.isclose(pval, 0.0)
class TestDynamicalPowerspectrum(object):
def setup_class(cls):
# generate timestamps
timestamps = np.linspace(1, 100, 10000)
freq = 25 + 1.2 * np.sin(2 * np.pi * timestamps / 130)
# variability signal with drifiting frequency
vari = 25 * np.sin(2 * np.pi * freq * timestamps)
signal = vari + 50
# create a lightcurve
lc = Lightcurve(timestamps, signal, err_dist='gauss')
cls.lc = lc
# Simple lc to demonstrate rebinning of dyn ps
# Simple lc to demonstrate rebinning of dyn ps
test_times = np.arange(16)
test_counts = [2, 3, 1, 3, 1, 5, 2, 1, 4, 2, 2, 2, 3, 4, 1, 7]
cls.lc_test = Lightcurve(test_times, test_counts)
def test_with_short_seg_size(self):
with pytest.raises(ValueError):
dps = DynamicalPowerspectrum(self.lc, segment_size=0)
def test_with_long_seg_size(self):
with pytest.raises(ValueError):
dps = DynamicalPowerspectrum(self.lc, segment_size=1000)
def test_matrix(self):
dps = DynamicalPowerspectrum(self.lc, segment_size=3)
nsegs = int(self.lc.tseg / dps.segment_size)
nfreq = int((1 / self.lc.dt) / (2 * (dps.freq[1] - dps.freq[0])) -
(1 / self.lc.tseg))
assert dps.dyn_ps.shape == (nfreq, nsegs)
def test_trace_maximum_without_boundaries(self):
dps = DynamicalPowerspectrum(self.lc, segment_size=3)
max_pos = dps.trace_maximum()
assert np.max(dps.freq[max_pos]) <= 1 / self.lc.dt
assert np.min(dps.freq[max_pos]) >= 1 / dps.segment_size
def test_trace_maximum_with_boundaries(self):
dps = DynamicalPowerspectrum(self.lc, segment_size=3)
minfreq = 21
maxfreq = 24
max_pos = dps.trace_maximum(min_freq=minfreq, max_freq=maxfreq)
assert | np.max(dps.freq[max_pos]) | numpy.max |
import numpy
import sys
def local_energy_generic(h1e, eri, G, ecore=0.0, Ghalf=None):
r"""Calculate local for generic two-body hamiltonian.
This uses the full form for the two-electron integrals.
For testing purposes only.
Parameters
----------
system : :class:`hubbard`
System information for the hubbard model.
G : :class:`numpy.ndarray`
Walker's "green's function"
Returns
-------
(E, T, V): tuple
Local, kinetic and potential energies.
"""
e1 = (numpy.einsum('ij,ij->', h1e[0], G[0]) +
numpy.einsum('ij,ij->', h1e[1], G[1]))
euu = 0.5*(numpy.einsum('prqs,pr,qs->', eri, G[0], G[0]) -
numpy.einsum('prqs,ps,qr->', eri, G[0], G[0]))
edd = 0.5*(numpy.einsum('prqs,pr,qs->', eri, G[1], G[1]) -
numpy.einsum('prqs,ps,qr->', eri, G[1], G[1]))
eud = 0.5*numpy.einsum('prqs,pr,qs->', eri, G[0], G[1])
edu = 0.5*numpy.einsum('prqs,pr,qs->', eri, G[1], G[0])
e2 = euu + edd + eud + edu
return (e1+e2+ecore, e1+ecore, e2)
def local_energy_generic_pno(system, G, Ghalf=None, eri=None, C0=None,\
ecoul0 = None, exxa0 = None, exxb0 = None, UVT=None):
na = system.nup
nb = system.ndown
M = system.nbasis
UVT_aa = UVT[0]
UVT_bb = UVT[1]
UVT_ab = UVT[2]
Ga, Gb = Ghalf[0], Ghalf[1]
# Element wise multiplication.
e1b = numpy.sum(system.H1[0]*G[0]) + numpy.sum(system.H1[1]*G[1])
eJaa = 0.0
eKaa = 0.0
if (len(C0.shape) == 3):
CT = C0[0,:,:].T
else:
CT = C0[:,:].T
GTa = CT[:na,:] # hard-coded to do single slater
GTb = CT[na:,:] # hard-coded to do single slater
for (i,j),(U,VT) in zip(system.ij_list_aa, UVT_aa):
if (i == j):
c = 0.5
else:
c = 1.0
theta_i = Ga[i,:]
theta_j = Ga[j,:]
thetaT_i = GTa[i,:]
thetaT_j = GTa[j,:]
thetaU = numpy.einsum("p,pk->k", theta_i,U)
thetaV = numpy.einsum("p,kp->k", theta_j,VT)
thetaTU = numpy.einsum("p,pk->k", thetaT_i,U)
thetaTV = numpy.einsum("p,kp->k", thetaT_j,VT)
eJaa += c * (numpy.dot(thetaU, thetaV) - numpy.dot(thetaTU, thetaTV))
thetaU = numpy.einsum("p,pk->k", theta_j,U)
thetaV = numpy.einsum("p,kp->k", theta_i,VT)
thetaTU = numpy.einsum("p,pk->k", thetaT_j,U)
thetaTV = numpy.einsum("p,kp->k", thetaT_i,VT)
eKaa -= c * (numpy.dot(thetaU, thetaV) - numpy.dot(thetaTU, thetaTV))
eJbb = 0.0
eKbb = 0.0
for (i,j),(U,VT) in zip(system.ij_list_bb, UVT_bb):
if (i == j):
c = 0.5
else:
c = 1.0
theta_i = Gb[i,:]
theta_j = Gb[j,:]
thetaT_i = GTb[i,:]
thetaT_j = GTb[j,:]
thetaU = numpy.einsum("p,pk->k", theta_i,U)
thetaV = numpy.einsum("p,kp->k", theta_j,VT)
thetaTU = numpy.einsum("p,pk->k", thetaT_i,U)
thetaTV = numpy.einsum("p,kp->k", thetaT_j,VT)
eJbb += c * (numpy.dot(thetaU, thetaV) - numpy.dot(thetaTU, thetaTV))
thetaU = numpy.einsum("p,pk->k", theta_j,U)
thetaV = numpy.einsum("p,kp->k", theta_i,VT)
thetaTU = numpy.einsum("p,pk->k", thetaT_j,U)
thetaTV = numpy.einsum("p,kp->k", thetaT_i,VT)
eKbb -= c * (numpy.dot(thetaU, thetaV) - numpy.dot(thetaTU, thetaTV))
eJab = 0.0
for (i,j),(U,VT) in zip(system.ij_list_ab, UVT_ab):
theta_i = Ga[i,:]
theta_j = Gb[j,:]
thetaT_i = GTa[i,:]
thetaT_j = GTb[j,:]
thetaU = numpy.einsum("p,pk->k", theta_i,U)
thetaV = numpy.einsum("p,kp->k", theta_j,VT)
thetaTU = numpy.einsum("p,pk->k", thetaT_i,U)
thetaTV = numpy.einsum("p,kp->k", thetaT_j,VT)
eJab += (numpy.dot(thetaU, thetaV) - numpy.dot(thetaTU, thetaTV))
e2b = 0.5*(ecoul0 - exxa0 - exxb0) + eJaa + eJbb + eJab + eKaa + eKbb
return (e1b + e2b + system.ecore, e1b + system.ecore, e2b)
def local_energy_generic_opt(system, G, Ghalf=None, eri=None):
na = system.nup
nb = system.ndown
M = system.nbasis
vipjq_aa = eri[0,:na**2*M**2].reshape((na,M,na,M))
vipjq_bb = eri[0,na**2*M**2:na**2*M**2+nb**2*M**2].reshape((nb,M,nb,M))
vipjq_ab = eri[0,na**2*M**2+nb**2*M**2:].reshape((na,M,nb,M))
Ga, Gb = Ghalf[0], Ghalf[1]
# Element wise multiplication.
e1b = numpy.sum(system.H1[0]*G[0]) + numpy.sum(system.H1[1]*G[1])
# Coulomb
eJaa = 0.5 * | numpy.einsum("irjs,ir,js", vipjq_aa, Ga, Ga) | numpy.einsum |
from __future__ import division, print_function, unicode_literals
import sys
import cv2
import numpy as np
from scipy.signal import argrelextrema
from matplotlib import pyplot as plt
import model_handler
from localization_cnn.constants import (
HEIGHT,
WIDTH,
TRAINED_MODELS,
MODEL)
HEAT_MAP_THRESH = 125
SLIDE = 8
class ROI:
def __init__(self, left, right, top, bottom):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.width = self.right - self.left
def get_x_range(self, width, slide):
return range(self.left, self.right - width, slide)
def get_y_range(self, height, slide):
return range(self.top, self.bottom - height, slide)
class Localizer:
def __init__(self, model, img):
self.width = WIDTH
self.height = WIDTH
self.slide = SLIDE
self.img = self.normalize_images(img, 1300)
self.img_h, self.img_w = self.img.shape[:2]
self.model = model
self.top = 0
self.left = 0
self.right = self.img_w
self.bottom = self.img_h
self.roi = ROI(self.left, self.right, self.top, self.bottom)
def process(self, debug=False):
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
windows, offsets = self._get_windows(gray)
predictions = self._predict(windows)
heat_map = self._calculate_heat_map(predictions, offsets)
_, thresh = cv2.threshold(heat_map, HEAT_MAP_THRESH, 255, cv2.THRESH_TOZERO)
x1, x2 = self._get_x_range_all(thresh)
y_peaks = self._get_y_response(thresh) # list of list
fields = self._get_fields(self.img, thresh, y_peaks)
if debug:
merge = np.ones((600, 800, 3), dtype=np.uint8) * 255
cur_y = 20
for f in fields:
merge[cur_y:cur_y+f.shape[0], 20:20+f.shape[1]] = f
cur_y += f.shape[0] + 10
cv2.imshow("Merge", merge)
cv2.imshow("Thresh", thresh[:, x1:x2])
cv2.waitKey(0)
return fields
def normalize_images(self, img, norm_w):
if img is None:
exit(1)
height, width = img.shape[:2]
target_height = round((norm_w / width) * height)
img_res = cv2.resize(src=img, dsize=(norm_w, target_height), interpolation=cv2.INTER_CUBIC)
return img_res
def _get_windows(self, img):
windows = []
offsets = []
for y_offset in self.roi.get_y_range(self.height, self.slide):
for x_offset in self.roi.get_x_range(self.width, self.slide):
img_window = img[y_offset: y_offset + self.height,
x_offset: x_offset + self.width]
img_window = img_window.flatten() / 255.0
windows.append(img_window)
offsets.append((x_offset, y_offset))
return windows, offsets
def _predict(self, windows):
n_windows = len(windows)
windows = np.array(windows)
windows = windows.reshape(n_windows, HEIGHT, WIDTH, 1).astype('float32')
predictions = self.model.predict(windows, verbose=0)
predictions = [pred[1] - pred[0] for pred in predictions]
return predictions
def _calculate_heat_map(self, predictions, offsets):
heat_map = | np.zeros((self.img_h, self.img_w)) | numpy.zeros |
"""
Predict on manifolds: losses.
"""
import numpy as np
import geomstats.lie_group as lie_group
from geomstats.special_orthogonal_group import SpecialOrthogonalGroup
SO3 = SpecialOrthogonalGroup(n=3)
def loss(y_pred, y_true,
metric=SO3.bi_invariant_metric,
representation='vector'):
if representation == 'quaternion':
y_pred = SO3.rotation_vector_from_quaternion(y_pred)
y_true = SO3.rotation_vector_from_quaternion(y_true)
loss = lie_group.loss(y_pred, y_true, SO3, metric)
return loss
def grad(y_pred, y_true,
metric=SO3.bi_invariant_metric,
representation='vector'):
y_pred = np.expand_dims(y_pred, axis=0)
y_true = np.expand_dims(y_true, axis=0)
if representation == 'vector':
grad = lie_group.grad(y_pred, y_true, SO3, metric)
if representation == 'quaternion':
differential = np.zeros((1, 6, 7))
differential = np.zeros((1, 3, 4))
quat_scalar = y_pred[:, :1]
quat_vec = y_pred[:, 1:]
quat_vec_norm = np.linalg.norm(quat_vec, axis=1)
quat_sq_norm = quat_vec_norm ** 2 + quat_scalar ** 2
quat_arctan2 = np.arctan2(quat_vec_norm, quat_scalar)
differential_scalar = - 2 * quat_vec / (quat_sq_norm)
differential_vec = (2 * (quat_scalar / quat_sq_norm
- 2 * quat_arctan2 / quat_vec_norm)
* np.outer(quat_vec, quat_vec) / quat_vec_norm ** 2
+ 2 * quat_arctan2 / quat_vec_norm * np.eye(3))
differential[0, :, :1] = differential_scalar.transpose()
differential[0, :, 1:] = differential_vec
y_pred = SO3.rotation_vector_from_quaternion(y_pred)
y_true = SO3.rotation_vector_from_quaternion(y_true)
grad = lie_group.grad(y_pred, y_true, SO3, metric)
grad = | np.matmul(grad, differential) | numpy.matmul |
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from copy import deepcopy
import re
import numpy as np
from .baseline import rescale, _check_baseline, _log_rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .channels.layout import _merge_ch_data, _pair_grad_sensors
from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT
from .filter import detrend, FilterMixin
from .utils import (check_fname, logger, verbose, _time_mask, warn, sizeof_fmt,
SizeMixin, copy_function_doc_to_method_doc, _validate_type,
fill_doc, _check_option, ShiftTimeMixin, _build_data_frame,
_check_pandas_installed, _check_pandas_index_arguments,
_convert_times, _scale_dataframe_data, _check_time_format)
from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
plot_evoked_image, plot_evoked_topo)
from .viz.evoked import plot_evoked_white, plot_evoked_joint
from .viz.topomap import _topomap_animation
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.tree import dir_tree_find
from .io.pick import pick_types, _picks_to_idx
from .io.meas_info import read_meas_info, write_meas_info
from .io.proj import ProjMixin
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_string, write_float_matrix,
write_id, write_float, write_complex_float_matrix)
from .io.base import TimeMixin, _check_maxshield
_aspect_dict = {
'average': FIFF.FIFFV_ASPECT_AVERAGE,
'standard_error': FIFF.FIFFV_ASPECT_STD_ERR,
'single_epoch': FIFF.FIFFV_ASPECT_SINGLE,
'partial_average': FIFF.FIFFV_ASPECT_SUBAVERAGE,
'alternating_subaverage': FIFF.FIFFV_ASPECT_ALTAVERAGE,
'sample_cut_out_by_graph': FIFF.FIFFV_ASPECT_SAMPLE,
'power_density_spectrum': FIFF.FIFFV_ASPECT_POWER_DENSITY,
'dipole_amplitude_cuvre': FIFF.FIFFV_ASPECT_DIPOLE_WAVE,
'squid_modulation_lower_bound': FIFF.FIFFV_ASPECT_IFII_LOW,
'squid_modulation_upper_bound': FIFF.FIFFV_ASPECT_IFII_HIGH,
'squid_gate_setting': FIFF.FIFFV_ASPECT_GATE,
}
_aspect_rev = {val: key for key, val in _aspect_dict.items()}
@fill_doc
class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin,
InterpolationMixin, FilterMixin, TimeMixin, SizeMixin,
ShiftTimeMixin):
"""Evoked data.
Parameters
----------
fname : str
Name of evoked/average FIF file to load.
If None no data is loaded.
condition : int, or str
Dataset ID number (int) or comment/name (str). Optional if there is
only one data set in file.
proj : bool, optional
Apply SSP projection vectors.
kind : str
Either 'average' or 'standard_error'. The type of data to read.
Only used if 'condition' is a str.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(verbose)s
Attributes
----------
info : dict
Measurement info.
ch_names : list of str
List of channels' names.
nave : int
Number of averaged epochs.
kind : str
Type of data, either average or standard_error.
comment : str
Comment on dataset. Can be the condition.
data : array of shape (n_channels, n_times)
Evoked response.
first : int
First time sample.
last : int
Last time sample.
tmin : float
The first time point in seconds.
tmax : float
The last time point in seconds.
times : array
Time vector in seconds. The time interval between consecutive time
samples is equal to the inverse of the sampling frequency.
baseline : None | tuple of length 2
This attribute reflects whether the data has been baseline-corrected
(it will be a ``tuple``) or not (it will be ``None``).
%(verbose)s
Notes
-----
Evoked objects contain a single condition only.
"""
@verbose
def __init__(self, fname, condition=None, proj=True,
kind='average', allow_maxshield=False,
verbose=None): # noqa: D102
_validate_type(proj, bool, "'proj'")
# Read the requested data
self.info, self.nave, self._aspect_kind, self.comment, self.times, \
self.data, self.baseline = _read_evoked(fname, condition, kind,
allow_maxshield)
self._update_first_last()
self.verbose = verbose
self.preload = True
if proj:
self.apply_proj()
@property
def kind(self):
"""The data kind."""
return _aspect_rev[self._aspect_kind]
@kind.setter
def kind(self, kind):
_check_option('kind', kind, list(_aspect_dict.keys()))
self._aspect_kind = _aspect_dict[kind]
@property
def data(self):
"""The data matrix."""
return self._data
@data.setter
def data(self, data):
"""Set the data matrix."""
self._data = data
@verbose
def apply_baseline(self, baseline=(None, 0), verbose=None):
"""Baseline correct evoked data.
Parameters
----------
%(baseline_evoked)s
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The baseline-corrected Evoked object.
Notes
-----
.. versionadded:: 0.13.0
"""
_check_baseline(baseline, self.times[0], self.times[-1],
self.info['sfreq'])
if self.baseline is not None and baseline is None:
raise ValueError('The data has already been baseline-corrected. '
'Cannot remove existing basline correction.')
elif baseline is None:
# Do not rescale
logger.info(_log_rescale(None))
else:
# Atually baseline-correct the data. Logging happens in rescale().
self.data = rescale(self.data, self.times, baseline, copy=False)
self.baseline = baseline
return self
def save(self, fname):
"""Save dataset to file.
Parameters
----------
fname : str
The name of the file, which should end with -ave.fif or
-ave.fif.gz.
Notes
-----
To write multiple conditions into a single file, use
:func:`mne.write_evokeds`.
.. versionchanged:: 0.21
Information on baseline correction will be stored with the dataset,
and will be restored when reading the data again via
`~mne.read_evokeds`.
"""
write_evokeds(fname, self)
def __repr__(self): # noqa: D105
s = "'%s' (%s, N=%s)" % (self.comment, self.kind, self.nave)
s += ", [%0.5g, %0.5g] sec" % (self.times[0], self.times[-1])
s += ", %s ch" % self.data.shape[0]
s += ", ~%s" % (sizeof_fmt(self._size),)
return "<Evoked | %s>" % s
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@property
def tmin(self):
"""First time point.
.. versionadded:: 0.21
"""
return self.times[0]
@property
def tmax(self):
"""Last time point.
.. versionadded:: 0.21
"""
return self.times[-1]
@verbose
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The cropped Evoked object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in Evoked time interval. tmin is set to '
'Evoked.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in Evoked time interval. tmax is set to '
'Evoked.tmax')
tmax = self.tmax
mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self.times = self.times[mask]
self._update_first_last()
self.data = self.data[:, mask]
try:
_check_baseline(self.baseline, tmin, tmax, self.info['sfreq'])
except ValueError as err:
err_msg = str(err)
acceptable_msgs = (
'Baseline interval is only one sample',
'Baseline interval (tmin = .*) is outside of data range',
'Baseline interval (tmax = .*) is outside of data range',
'Baseline min (.*) must be less than baseline max'
)
if any([re.match(regexp, err_msg) for regexp in acceptable_msgs]):
# The baseline period no longer applies, so wipe it out.
warn('Cropping removes baseline period, setting baseline=None')
self.baseline = None
else:
# Something unexpected happened.
raise err
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the evoked data.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose_meth)s
Returns
-------
evoked : instance of Evoked
The decimated Evoked object.
See Also
--------
Epochs.decimate
Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
.. versionadded:: 0.13.0
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(self.times[0] * (self.info['sfreq'] * decim)))
i_start = start_idx % decim + offset
decim_slice = slice(i_start, None, decim)
self.info['sfreq'] = new_sfreq
self.data = self.data[:, decim_slice].copy()
self.times = self.times[decim_slice].copy()
self._update_first_last()
return self
@copy_function_doc_to_method_doc(plot_evoked)
def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
xlim='tight', proj=False, hline=None, units=None, scalings=None,
titles=None, axes=None, gfp=False, window_title=None,
spatial_colors=False, zorder='unsorted', selectable=True,
noise_cov=None, time_unit='s', sphere=None, verbose=None):
return plot_evoked(
self, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, gfp=gfp,
window_title=window_title, spatial_colors=spatial_colors,
zorder=zorder, selectable=selectable, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere, verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_image)
def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r',
colorbar=True, mask=None, mask_style=None,
mask_cmap='Greys', mask_alpha=.25, time_unit='s',
show_names=None, group_by=None, sphere=None):
return plot_evoked_image(
self, picks=picks, exclude=exclude, unit=unit, show=show,
clim=clim, xlim=xlim, proj=proj, units=units, scalings=scalings,
titles=titles, axes=axes, cmap=cmap, colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names, group_by=group_by,
sphere=sphere)
@copy_function_doc_to_method_doc(plot_evoked_topo)
def plot_topo(self, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""
Notes
-----
.. versionadded:: 0.10.0
"""
return plot_evoked_topo(
self, layout=layout, layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings, title=title,
proj=proj, vline=vline, fig_background=fig_background,
merge_grads=merge_grads, legend=legend, axes=axes,
background_color=background_color, noise_cov=noise_cov, show=show)
@copy_function_doc_to_method_doc(plot_evoked_topomap)
def plot_topomap(self, times="auto", ch_type=None, vmin=None,
vmax=None, cmap=None, sensors=True, colorbar=True,
scalings=None, units=None, res=64,
size=1, cbar_fmt="%3.1f",
time_unit='s', time_format=None,
proj=False, show=True, show_names=False, title=None,
mask=None, mask_params=None, outlines='head',
contours=6, image_interp='bilinear', average=None,
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT, nrows=1, ncols='auto'):
return plot_evoked_topomap(
self, times=times, ch_type=ch_type, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors, colorbar=colorbar,
scalings=scalings, units=units, res=res,
size=size, cbar_fmt=cbar_fmt, time_unit=time_unit,
time_format=time_format, proj=proj, show=show,
show_names=show_names, title=title, mask=mask,
mask_params=mask_params, outlines=outlines, contours=contours,
image_interp=image_interp, average=average,
axes=axes, extrapolate=extrapolate, sphere=sphere, border=border,
nrows=nrows, ncols=ncols)
@copy_function_doc_to_method_doc(plot_evoked_field)
def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1, fig=None, vmax=None, n_contours=21, verbose=None):
return plot_evoked_field(self, surf_maps, time=time,
time_label=time_label, n_jobs=n_jobs,
fig=fig, vmax=vmax, n_contours=n_contours,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_white)
def plot_white(self, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
return plot_evoked_white(
self, noise_cov=noise_cov, rank=rank, show=show,
time_unit=time_unit, sphere=sphere, axes=axes, verbose=verbose)
@copy_function_doc_to_method_doc(plot_evoked_joint)
def plot_joint(self, times="peaks", title='', picks=None,
exclude='bads', show=True, ts_args=None,
topomap_args=None):
return plot_evoked_joint(self, times=times, title=title, picks=picks,
exclude=exclude, show=show, ts_args=ts_args,
topomap_args=topomap_args)
@fill_doc
def animate_topomap(self, ch_type=None, times=None, frame_rate=None,
butterfly=False, blit=True, show=True, time_unit='s',
sphere=None):
"""Make animation of evoked data as topomap timeseries.
The animation can be paused/resumed with left mouse button.
Left and right arrow keys can be used to move backward or forward
in time.
Parameters
----------
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg',
'hbo', 'hbr', 'fnirs_od, and 'fnirs_cw_amplitude'.
If None, first available channel type from ('mag', 'grad', 'eeg',
'hbo', 'hbr', 'fnirs_od, 'fnirs_cw_amplitude') is used.
Defaults to None.
times : array of float | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None,
frame rate = sfreq / 10. Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is
recommended to use blit in combination with ``show=True``. If you
intend to save the animation it is better to disable blit.
Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
time_unit : str
The units for the time axis, can be "ms" (default in 0.16)
or "s" (will become the default in 0.17).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
anim : instance of matplotlib.animation.FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
return _topomap_animation(
self, ch_type=ch_type, times=times, frame_rate=frame_rate,
butterfly=butterfly, blit=blit, show=show, time_unit=time_unit,
sphere=sphere)
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields.
.. Warning:: Using virtual evoked to compute inverse can yield
unexpected results. The virtual channels have ``'_v'`` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used. ``'fast'`` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
Notes
-----
This method returns a copy and does not modify the data it
operates on. It also returns an EvokedArray instance.
.. versionadded:: 0.9.0
"""
from .forward import _as_meg_type_inst
return _as_meg_type_inst(self, ch_type=ch_type, mode=mode)
@fill_doc
def detrend(self, order=1, picks=None):
"""Detrend data.
This function operates in-place.
Parameters
----------
order : int
Either 0 or 1, the order of the detrending. 0 is a constant
(DC) detrend, 1 is a linear detrend.
%(picks_good_data)s
Returns
-------
evoked : instance of Evoked
The detrended evoked object.
"""
picks = _picks_to_idx(self.info, picks)
self.data[picks] = detrend(self.data[picks], order, axis=-1)
return self
def copy(self):
"""Copy the instance of evoked.
Returns
-------
evoked : instance of Evoked
A copy of the object.
"""
evoked = deepcopy(self)
return evoked
def __neg__(self):
"""Negate channel responses.
Returns
-------
evoked_neg : instance of Evoked
The Evoked instance with channel data negated and '-'
prepended to the comment.
"""
out = self.copy()
out.data *= -1
out.comment = '-' + (out.comment or 'unknown')
return out
def get_peak(self, ch_type=None, tmin=None, tmax=None,
mode='abs', time_as_index=False, merge_grads=False,
return_amplitude=False):
"""Get location and latency of peak amplitude.
Parameters
----------
ch_type : 'mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', hbr', 'misc', None
The channel type to use. Defaults to None. If more than one sensor
Type is present in the data the channel type has to be explicitly
set.
tmin : float | None
The minimum point in time to be considered for peak getting.
If None (default), the beginning of the data is used.
tmax : float | None
The maximum point in time to be considered for peak getting.
If None (default), the end of the data is used.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
time_as_index : bool
Whether to return the time index instead of the latency in seconds.
merge_grads : bool
If True, compute peak from merged gradiometer data.
return_amplitude : bool
If True, return also the amplitude at the maximum response.
.. versionadded:: 0.16
Returns
-------
ch_name : str
The channel exhibiting the maximum response.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
amplitude : float
The amplitude of the maximum response. Only returned if
return_amplitude is True.
.. versionadded:: 0.16
""" # noqa: E501
supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', 'hbo',
'hbr', 'None', 'fnirs_cw_amplitude', 'fnirs_od')
types_used = self.get_channel_types(unique=True, only_data_chs=True)
_check_option('ch_type', str(ch_type), supported)
if ch_type is not None and ch_type not in types_used:
raise ValueError('Channel type `{ch_type}` not found in this '
'evoked object.'.format(ch_type=ch_type))
elif len(types_used) > 1 and ch_type is None:
raise RuntimeError('More than one sensor type found. `ch_type` '
'must not be `None`, pass a sensor type '
'value instead')
if merge_grads:
if ch_type != 'grad':
raise ValueError('Channel type must be grad for merge_grads')
elif mode == 'neg':
raise ValueError('Negative mode (mode=neg) does not make '
'sense with merge_grads=True')
meg = eeg = misc = seeg = ecog = fnirs = False
picks = None
if ch_type in ('mag', 'grad'):
meg = ch_type
elif ch_type == 'eeg':
eeg = True
elif ch_type == 'misc':
misc = True
elif ch_type == 'seeg':
seeg = True
elif ch_type == 'ecog':
ecog = True
elif ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'):
fnirs = ch_type
if ch_type is not None:
if merge_grads:
picks = _pair_grad_sensors(self.info, topomap_coords=False)
else:
picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
seeg=seeg, ecog=ecog, ref_meg=False,
fnirs=fnirs)
data = self.data
ch_names = self.ch_names
if picks is not None:
data = data[picks]
ch_names = [ch_names[k] for k in picks]
if merge_grads:
data, _ = _merge_ch_data(data, ch_type, [])
ch_names = [ch_name[:-1] + 'X' for ch_name in ch_names[::2]]
ch_idx, time_idx, max_amp = _get_peak(data, self.times, tmin,
tmax, mode)
out = (ch_names[ch_idx], time_idx if time_as_index else
self.times[time_idx])
if return_amplitude:
out += (max_amp,)
return out
@fill_doc
def to_data_frame(self, picks=None, index=None,
scalings=None, copy=True, long_format=False,
time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(picks_all)s
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_copy)s
%(df_longform_raw)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
data = self.data[picks, :]
times = self.times
data = data.T
if copy:
data = data.copy()
data = _scale_dataframe_data(self, data, picks, scalings)
# prepare extra columns / multiindex
mindex = list()
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# build DataFrame
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=['time'])
return df
def _check_decim(info, decim, offset):
"""Check decimation parameters."""
if decim < 1 or decim != int(decim):
raise ValueError('decim must be an integer > 0')
decim = int(decim)
new_sfreq = info['sfreq'] / float(decim)
lowpass = info['lowpass']
if decim > 1 and lowpass is None:
warn('The measurement information indicates data is not low-pass '
'filtered. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (decim, new_sfreq))
elif decim > 1 and new_sfreq < 3 * lowpass:
warn('The measurement information indicates a low-pass frequency '
'of %g Hz. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
return decim, offset, new_sfreq
@fill_doc
class EvokedArray(Evoked):
"""Evoked object from numpy array.
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response. See notes for proper units of measure.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event. Defaults to 0.
comment : str
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
%(verbose)s
%(baseline_array)s
Defaults to ``None``, i.e. no baseline correction.
.. versionadded:: 0.21
See Also
--------
EpochsArray, io.RawArray, create_info
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
@verbose
def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average',
verbose=None, baseline=None): # noqa: D102
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples), got shape %s' % (data.shape,))
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
| np.shape(data) | numpy.shape |
import numpy as np
from operator import itemgetter
from ldds.base import generate_points, lagrangian_descriptor
def check_if_points_escape_box(u, box_boundaries):
"""
Determine if points u in 2D plane have escaped from box_boundaries limits.
Parameters
----------
u : ndarray, shape(n, 2),
Points in plane.
box_boundaries : list of 2 tuples of floats,
Values are interpreted as [[x_min,x_max], [y_min, y_max]].
Returns
-------
u_indices : ndarray of bools, shape(n, 2),
True/False for points inside/outside the box_boundaries respectively.
"""
x, y = u.T
# Escape condition
box_x_min, box_x_max = box_boundaries[0]
box_y_min, box_y_max = box_boundaries[1]
u_indices = (x >= box_x_min) & (x <= box_x_max) & (y >= box_y_min) & (y <= box_y_max)
return u_indices
def pbc_correction_coords_single_axis(x, box_origin, box_length):
"""
Correct single coordinate on a periodic domain.
Parameters
----------
x : ndarray, shape(n,)
Coordinate values.
box_origin : float or False(bool),
Values of perdiodic domain origin. If False, no correction is applied.
box_length : float or False(bool)
Length of periodic doamin. If False, no correction is applied
Returns
-------
x_pbc : ndarray, shape(n,)
Corrected coordinate values.
"""
x_pbc = x
x0 = box_origin
L = box_length
if not isinstance(x0, bool) or not isinstance(L, bool):
#apply PBC correction
x = x + L/2 - x0
x = | np.mod(x + 2*L, L) | numpy.mod |
from detectron2.utils.logger import setup_logger
setup_logger()
import cv2, os, re
import numpy as np
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from densepose.config import add_densepose_config
from densepose.vis.base import CompoundVisualizer
from densepose.vis.densepose_results import DensePoseResultsFineSegmentationVisualizer, DensePoseResultsVisualizer
from densepose.vis.densepose_data_points import DensePoseDataCoarseSegmentationVisualizer
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
from densepose.vis.extractor import CompoundExtractor, DensePoseResultExtractor, create_extractor
from densepose.vis.extractor import extract_boxes_xywh_from_instances
from densepose.converters import ToChartResultConverterWithConfidences
from densepose.vis.base import MatrixVisualizer
import torch
import collections
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy.ndimage.interpolation import rotate
from scipy.spatial import ConvexHull
import pandas as pd
from skimage import morphology
# window setting
window_segm = 'segm'
window_bbox = 'bbox'
window_norm = 'norm'
window_dilation = 'dilation'
window_stitched_data = 'stitched data'
# setting
gray_val_scale = 10.625
cmap = cv2.COLORMAP_PARULA
# files of config
densepose_keypoints_dir = os.path.join('output', 'segments')
openpose_keypoints_dir = os.path.join('output', 'data')
norm_segm_dir = os.path.join('output', 'pix')
fname_vitruve_norm = os.path.join('pix', 'vitruve_norm.png')
# data type
# keypoints = {key: (x, y, score)}
# pixel = (x, y)
# segments_xy = [(x1, y1), (x2, y2), ...]
# segm = [[x1, y1]=(b,g,r), [x2, y2]=(b,g,r), ...] -> 2D np.ndarray
# coarse segmentation:
# 0 = Background
# 1 = Torso,
# 2 = Right Hand, 3 = Left Hand, 4 = Left Foot, 5 = Right Foot,
# 6 = Upper Leg Right, 7 = Upper Leg Left, 8 = Lower Leg Right, 9 = Lower Leg Left,
# 10 = Upper Arm Left, 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
# 14 = Head
COARSE_ID = [
'Background',
'Torso',
'RHand', 'LHand', 'LFoot', 'RFoot',
'RThigh', 'LThigh', 'RCalf', 'LCalf',
'LUpperArm', 'RUpperArm', 'LLowerArm', 'RLowerArm',
'Head'
]
# implicit cmap = cv2.COLORMAP_PARULA <= hard-coded!!! ugh!!!
# BGRA -> alpha channel: 0 = transparent, 255 = non-transparent
COARSE_TO_COLOR = {
'Background': [255, 255, 255, 255],
'Torso': [191, 78, 22, 255],
'RThigh': [167, 181, 44, 255],
'LThigh': [141, 187, 91, 255],
'RCalf': [114, 191, 147, 255],
'LCalf': [96, 188, 192, 255],
'LUpperArm': [87, 207, 112, 255],
'RUpperArm': [55, 218, 162, 255],
'LLowerArm': [25, 226, 216, 255],
'RLowerArm': [37, 231, 253, 255],
'Head': [14, 251, 249, 255]
}
# fine segmentation:
# 0 = Background
# 1, 2 = Torso,
# 3 = Right Hand, 4 = Left Hand, 5 = Left Foot, 6 = Right Foot,
# 7, 9 = Upper Leg Right, 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right, 12, 14 = Lower Leg Left,
# 15, 17 = Upper Arm Left, 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left, 20, 22 = Lower Arm Right,
# 23, 24 = Head
FINE_TO_COARSE_SEGMENTATION = {
1: 1,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 6,
10: 7,
11: 8,
12: 9,
13: 8,
14: 9,
15: 10,
16: 11,
17: 10,
18: 11,
19: 12,
20: 13,
21: 12,
22: 13,
23: 14,
24: 14
}
# Body 25 Keypoints
JOINT_ID = [
'Nose', 'Neck',
'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist',
'MidHip',
'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle',
'REye', 'LEye', 'REar', 'LEar',
'LBigToe', 'LSmallToe', 'LHeel', 'RBigToe', 'RSmallToe', 'RHeel',
'Background'
]
def _extract_i_from_iuvarr(iuv_arr):
return iuv_arr[0, :, :]
def _extract_u_from_iuvarr(iuv_arr):
return iuv_arr[1, :, :]
def _extract_v_from_iuvarr(iuv_arr):
return iuv_arr[2, :, :]
def extract_segm(result_densepose, is_coarse=True):
iuv_array = torch.cat(
(result_densepose.labels[None].type(torch.float32), result_densepose.uv * 255.0)
).type(torch.uint8)
iuv_array = iuv_array.cpu().numpy()
segm = _extract_i_from_iuvarr(iuv_array)
if is_coarse:
for fine_idx, coarse_idx in FINE_TO_COARSE_SEGMENTATION.items():
segm[segm == fine_idx] = coarse_idx
mask = np.zeros(segm.shape, dtype=np.uint8)
mask[segm > 0] = 1
# matrix = _extract_v_from_iuvarr(iuv_array)
return mask, segm
def _resize(mask, segm, w, h):
interp_method_mask = cv2.INTER_NEAREST
interp_method_segm = cv2.INTER_LINEAR,
if (w != mask.shape[1]) or (h != mask.shape[0]):
mask = cv2.resize(mask, (w, h), interp_method_mask)
if (w != segm.shape[1]) or (h != segm.shape[0]):
segm = cv2.resize(segm, (w, h), interp_method_segm)
return mask, segm
def _calc_angle(point1, center, point2):
try:
a = np.array(point1)[0:2] - np.array(center)[0:2]
b = np.array(point2)[0:2] - np.array(center)[0:2]
cos_theta = np.dot(a, b)
sin_theta = np.cross(a, b)
rad = np.arctan2(sin_theta, cos_theta)
deg = np.rad2deg(rad)
if np.isnan(rad):
return 0, 0
return rad, deg
except:
return 0, 0
def _rotate(point, center, rad):
# print(point)
x = ((point[0] - center[0]) * np.cos(rad)) - ((point[1] - center[1]) * np.sin(rad)) + center[0]
y = ((point[0] - center[0]) * np.sin(rad)) + ((point[1] - center[1]) * np.cos(rad)) + center[1]
if len(point) == 3:
return [int(x), int(y), point[2]] # for keypoints with score
elif len(point) == 2:
return (int(x), int(y)) # for segments (x, y) without score
def _segm_xy(segm, segm_id_list, is_equal=True):
if len(segm_id_list) == 1:
segm_id = segm_id_list[0]
if is_equal:
y, x = np.where(segm == segm_id)
else:
y, x = np.where(segm != segm_id)
elif len(segm_id_list) > 1:
if is_equal:
cond = []
for segm_id in segm_id_list:
cond.append(segm == segm_id)
y, x = np.where(np.logical_or.reduce(tuple(cond)))
else:
cond = []
for segm_id in segm_id_list:
cond.append(segm != segm_id)
y, x = np.where(np.logical_or.reduce(tuple(cond)))
return list(zip(x, y))
def _segments_xy_centroid(segments_xy):
x = [segment_xy[0] for segment_xy in segments_xy if not np.isnan(segment_xy[0])]
y = [segment_xy[1] for segment_xy in segments_xy if not np.isnan(segment_xy[1])]
centroid = (sum(x) / len(segments_xy), sum(y) / len(segments_xy))
return centroid
def _keypoints_midpoint(keypoint1, keypoint2):
return ((np.array(keypoint1) + np.array(keypoint2)) / 2).astype(int)
def is_valid(keypoints):
# check the scores for each main keypoint, which MUST exist!
# main_keypoints = BODY BOX
main_keypoints = ['Nose', 'Neck', 'RShoulder', 'LShoulder', 'RHip', 'LHip', 'MidHip']
keypoints = dict(zip(JOINT_ID, keypoints))
# filter the main keypoints by score > 0
filtered_keypoints = [key for key, value in keypoints.items() if key in main_keypoints and value[2] > 0]
print('Number of valid keypoints (must be equal to 7):', len(filtered_keypoints))
if len(filtered_keypoints) != 7:
return False
else:
return True
def _get_segments_xy(segm, keypoints):
segments_xy = []
bg_xy = [] # 0
segments_xy.append(bg_xy)
torso_xy = _segm_xy(segm=segm, segm_id_list=[1])
segments_xy.append(torso_xy)
r_hand_xy = [] # 2
l_hand_xy = [] # 3
l_foot_xy = [] # 4
r_foot_xy = [] # 5
segments_xy.append(r_hand_xy)
segments_xy.append(l_hand_xy)
segments_xy.append(l_foot_xy)
segments_xy.append(r_foot_xy)
r_thigh_xy = _segm_xy(segm=segm, segm_id_list=[6])
l_thigh_xy = _segm_xy(segm=segm, segm_id_list=[7])
r_calf_xy = _segm_xy(segm=segm, segm_id_list=[8])
l_calf_xy = _segm_xy(segm=segm, segm_id_list=[9])
segments_xy.append(r_thigh_xy)
segments_xy.append(l_thigh_xy)
segments_xy.append(r_calf_xy)
segments_xy.append(l_calf_xy)
l_upper_arm_xy = _segm_xy(segm=segm, segm_id_list=[10])
r_upper_arm_xy = _segm_xy(segm=segm, segm_id_list=[11])
l_lower_arm_xy = _segm_xy(segm=segm, segm_id_list=[12])
r_lower_arm_xy = _segm_xy(segm=segm, segm_id_list=[13])
segments_xy.append(l_upper_arm_xy)
segments_xy.append(r_upper_arm_xy)
segments_xy.append(l_lower_arm_xy)
segments_xy.append(r_lower_arm_xy)
head_xy = _segm_xy(segm=segm, segm_id_list=[14])
segments_xy.append(head_xy)
# valid segments with keypoints
dict_segments_xy = dict(zip(COARSE_ID, segments_xy))
segments_xy = {}
# head
if len(dict_segments_xy['Head']) > 0 and keypoints['Nose'][2] > 0:
segments_xy['Head'] = {'segm_xy': dict_segments_xy['Head'],
'keypoints':
{'Nose': keypoints['Nose']}
}
# torso
if len(dict_segments_xy['Torso']) > 0:
segments_xy['Torso'] = {'segm_xy': dict_segments_xy['Torso'],
'keypoints':
{'Neck': keypoints['Neck'],
'RShoulder': keypoints['RShoulder'],
'LShoulder': keypoints['LShoulder'],
'MidHip': keypoints['MidHip'],
'RHip': keypoints['RHip'],
'LHip': keypoints['LHip']}
}
# lower limbs
if len(dict_segments_xy['RThigh']) > 0 and 'RKnee' in keypoints and keypoints['RKnee'][2] > 0:
segments_xy['RThigh'] = {'segm_xy': dict_segments_xy['RThigh'],
'keypoints':
{'RKnee': keypoints['RKnee']}
}
if len(dict_segments_xy['LThigh']) > 0 and 'LKnee' in keypoints and keypoints['LKnee'][2] > 0:
segments_xy['LThigh'] = {'segm_xy': dict_segments_xy['LThigh'],
'keypoints':
{'LKnee': keypoints['LKnee']}
}
if len(dict_segments_xy['RCalf']) > 0 and 'RAnkle' in keypoints and keypoints['RAnkle'][2] > 0:
segments_xy['RCalf'] = {'segm_xy': dict_segments_xy['RCalf'],
'keypoints':
{'RAnkle': keypoints['RAnkle']}
}
if len(dict_segments_xy['LCalf']) > 0 and 'LAnkle' in keypoints and keypoints['LAnkle'][2] > 0:
segments_xy['LCalf'] = {'segm_xy': dict_segments_xy['LCalf'],
'keypoints':
{'LAnkle': keypoints['LAnkle']}
}
# upper limbs
if len(dict_segments_xy['RUpperArm']) > 0 and 'RElbow' in keypoints and keypoints['RElbow'][2] > 0:
segments_xy['RUpperArm'] = {'segm_xy': dict_segments_xy['RUpperArm'],
'keypoints':
{'RElbow': keypoints['RElbow']}
}
if len(dict_segments_xy['LUpperArm']) > 0 and 'LElbow' in keypoints and keypoints['LElbow'][2] > 0:
segments_xy['LUpperArm'] = {'segm_xy': dict_segments_xy['LUpperArm'],
'keypoints':
{'LElbow': keypoints['LElbow']}
}
if len(dict_segments_xy['RLowerArm']) > 0 and 'RWrist' in keypoints and keypoints['RWrist'][2] > 0:
segments_xy['RLowerArm'] = {'segm_xy': dict_segments_xy['RLowerArm'],
'keypoints':
{'RWrist': keypoints['RWrist']}
}
if len(dict_segments_xy['LLowerArm']) > 0 and 'LWrist' in keypoints and keypoints['LWrist'][2] > 0:
segments_xy['LLowerArm'] = {'segm_xy': dict_segments_xy['LLowerArm'],
'keypoints':
{'LWrist': keypoints['LWrist']}
}
return segments_xy
def _rotate_to_vertical_pose(segments_xy):
midhip_keypoint = segments_xy['Torso']['keypoints']['MidHip']
neck_keypoint = segments_xy['Torso']['keypoints']['Neck']
# calculate the angle for rotation to vertical pose
reference_point = np.array(midhip_keypoint) + np.array((0, -100, 0))
rad, deg = _calc_angle(point1=neck_keypoint, center=midhip_keypoint, point2=reference_point)
for segment_id, segment in segments_xy.items():
segments_xy[segment_id]['segm_xy'] = np.array([_rotate((x, y), midhip_keypoint, rad) for (x, y) in segment['segm_xy']])
for keypoints_id, keypoints in segment['keypoints'].items():
segments_xy[segment_id]['keypoints'][keypoints_id] = _rotate(keypoints, midhip_keypoint, rad)
return segments_xy
def _rotate_head_around_centroid(segm_xy, keypoint1_ref, keypoint2_ref):
# midpoint of vertical line and horizontal line
centroid = _segments_xy_centroid(segm_xy)
rad, deg = _calc_angle(centroid, keypoint1_ref, keypoint2_ref)
rad += np.pi
segm_xy = np.array([_rotate([x, y], keypoint1_ref, rad) for (x, y) in segm_xy])
keypoint = _rotate(centroid, keypoint1_ref, rad)
return segm_xy, keypoint
def _rotate_limbs_around_midpoint(segm_xy, keypoint, ref_keypoint, is_right, is_leg):
# mid-keypoint
midpoint = _keypoints_midpoint(keypoint1=keypoint, keypoint2=ref_keypoint)
# rotate to horizontal
ref_midpoint = midpoint + np.array([50, 0, 0])
if is_right:
rad, deg = _calc_angle(ref_keypoint, midpoint, ref_midpoint)
if is_leg:
rad -= np.pi/2
else:
rad, deg = _calc_angle(keypoint, midpoint, ref_midpoint)
if is_leg:
rad += np.pi / 2
segm_xy = np.array([_rotate([x, y], midpoint, rad) for (x, y) in segm_xy])
keypoint = midpoint
return segm_xy, keypoint
def _rotate_to_tpose(segments_xy):
# nose -> head (BUT nose is not at the middle point of face, e.g., face right, face left!!!)
# midhip -> torso (DONE in vertical rotation)
# elbow -> upper arm
# wrist -> lower arm
# knee -> thigh
# ankle -> calf
# valid keypoints confirmed by is_valid()
nose_keypoint = segments_xy['Head']['keypoints']['Nose']
neck_keypoint = segments_xy['Torso']['keypoints']['Neck']
rsho_keypoint = segments_xy['Torso']['keypoints']['RShoulder']
lsho_keypoint = segments_xy['Torso']['keypoints']['LShoulder']
midhip_keypoint = segments_xy['Torso']['keypoints']['MidHip']
rhip_keypoint = segments_xy['Torso']['keypoints']['RHip']
lhip_keypoint = segments_xy['Torso']['keypoints']['LHip']
# update midhip keypoint = [vertical height of torso] + [midpoint = (midhip + neck) / 2]
if 'Torso' in segments_xy and len(segments_xy['Torso']['segm_xy']) > 0:
segments_xy['Torso']['keypoints']['MidHip'] = (_euclidian(neck_keypoint, midhip_keypoint), _keypoints_midpoint(neck_keypoint, midhip_keypoint))
# buggy -> update midhip keypoint = (midhip + neck) / 2
# elongated torso <- (1) shoulders go up at both sides; (2) crotch goes down in the middle;
# segments_xy['Torso']['keypoints']['MidHip'] = _keypoints_midpoint(neck_keypoint, midhip_keypoint)
# head -> NOT use Nose, use Centroid of head_xy!!!
# ONE solution to Issue FOUR: NOSE is not at the middle point of the head!!!
# so nose keypoint = head centroid
if 'Head' in segments_xy and len(segments_xy['Head']['segm_xy']) > 0:
segm_xy, keypoint = _rotate_head_around_centroid(segm_xy=segments_xy['Head']['segm_xy'],
keypoint1_ref=neck_keypoint,
keypoint2_ref=midhip_keypoint)
segments_xy['Head']['segm_xy'] = segm_xy
segments_xy['Head']['keypoints']['Nose'] = keypoint
# Upper Limb
# Right
# wrist keypoint = lower arm midpoint
if 'RLowerArm' in segments_xy and 'RUpperArm' in segments_xy and len(segments_xy['RLowerArm']['segm_xy']) > 0 and segments_xy['RLowerArm']['keypoints']['RWrist'][2] > 0 and segments_xy['RUpperArm']['keypoints']['RElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RLowerArm']['segm_xy'],
keypoint=segments_xy['RLowerArm']['keypoints']['RWrist'],
ref_keypoint=segments_xy['RUpperArm']['keypoints']['RElbow'],
is_right=True,
is_leg=False)
segments_xy['RLowerArm']['segm_xy'] = segm_xy
segments_xy['RLowerArm']['keypoints']['RWrist'] = keypoint
# elbow keypoint = upper arm midpoint
if 'RUpperArm' in segments_xy and len(segments_xy['RUpperArm']['segm_xy']) > 0 and segments_xy['RUpperArm']['keypoints']['RElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RUpperArm']['segm_xy'],
keypoint=segments_xy['RUpperArm']['keypoints']['RElbow'],
ref_keypoint=rsho_keypoint,
is_right=True,
is_leg=False)
segments_xy['RUpperArm']['segm_xy'] = segm_xy
segments_xy['RUpperArm']['keypoints']['RElbow'] = keypoint
# Left
# wrist keypoint = lower arm midpoint
if 'LLowerArm' in segments_xy and 'LUpperArm' in segments_xy and len(segments_xy['LLowerArm']['segm_xy']) > 0 and segments_xy['LLowerArm']['keypoints']['LWrist'][2] > 0 and segments_xy['LUpperArm']['keypoints']['LElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LLowerArm']['segm_xy'],
keypoint=segments_xy['LLowerArm']['keypoints']['LWrist'],
ref_keypoint=segments_xy['LUpperArm']['keypoints']['LElbow'],
is_right=False,
is_leg=False)
segments_xy['LLowerArm']['segm_xy'] = segm_xy
segments_xy['LLowerArm']['keypoints']['LWrist'] = keypoint
# elbow keypoint = upper arm midpoint
if 'LUpperArm' in segments_xy and len(segments_xy['LUpperArm']['segm_xy']) > 0 and segments_xy['LUpperArm']['keypoints']['LElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LUpperArm']['segm_xy'],
keypoint=segments_xy['LUpperArm']['keypoints']['LElbow'],
ref_keypoint=lsho_keypoint,
is_right=False,
is_leg=False)
segments_xy['LUpperArm']['segm_xy'] = segm_xy
segments_xy['LUpperArm']['keypoints']['LElbow'] = keypoint
# Lower Limb
# Right
# ankle keypoint = calf midpoint
if 'RCalf' in segments_xy and 'RThigh' in segments_xy and len(segments_xy['RCalf']['segm_xy']) > 0 and segments_xy['RCalf']['keypoints']['RAnkle'][2] > 0 and segments_xy['RThigh']['keypoints']['RKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RCalf']['segm_xy'],
keypoint=segments_xy['RCalf']['keypoints']['RAnkle'],
ref_keypoint=segments_xy['RThigh']['keypoints']['RKnee'],
is_right=True,
is_leg=True)
segments_xy['RCalf']['segm_xy'] = segm_xy
segments_xy['RCalf']['keypoints']['RAnkle'] = keypoint
# knee keypoint = thigh midpoint
if 'RThigh' in segments_xy and len(segments_xy['RThigh']['segm_xy']) > 0 and segments_xy['RThigh']['keypoints']['RKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RThigh']['segm_xy'],
keypoint=segments_xy['RThigh']['keypoints']['RKnee'],
ref_keypoint=rhip_keypoint,
is_right=True,
is_leg=True)
segments_xy['RThigh']['segm_xy'] = segm_xy
segments_xy['RThigh']['keypoints']['RKnee'] = keypoint
# Left
# ankle keypoint = calf midpoint
if 'LCalf' in segments_xy and 'LThigh' in segments_xy and len(segments_xy['LCalf']['segm_xy']) > 0 and segments_xy['LCalf']['keypoints']['LAnkle'][2] > 0 and segments_xy['LThigh']['keypoints']['LKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LCalf']['segm_xy'],
keypoint=segments_xy['LCalf']['keypoints']['LAnkle'],
ref_keypoint=segments_xy['LThigh']['keypoints']['LKnee'],
is_right=False,
is_leg=True)
segments_xy['LCalf']['segm_xy'] = segm_xy
segments_xy['LCalf']['keypoints']['LAnkle'] = keypoint
# knee keypoint = thigh midpoint
if 'LThigh' in segments_xy and len(segments_xy['LThigh']['segm_xy']) > 0 and segments_xy['LThigh']['keypoints']['LKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LThigh']['segm_xy'],
keypoint=segments_xy['LThigh']['keypoints']['LKnee'],
ref_keypoint=lhip_keypoint,
is_right=False,
is_leg=True)
segments_xy['LThigh']['segm_xy'] = segm_xy
segments_xy['LThigh']['keypoints']['LKnee'] = keypoint
return segments_xy
def rotate_segments_xy(segm, keypoints):
# Issue ONE: cannot rotate body to [Face-front + Torso-front] view!!!
# Issue TWO: cannot have the same person -> so it can be a fat person or a thin person!!!
# *Issue THREE*: NO mapped HAND and FOOT keypoints to rotate them - hands are feet are ignored in analysis!!!
# *Issue FOUR*: NOSE is not at the middle point of the head, e.g., face right, face left, so cannot normalize HEAD!!!
# STEP 1: rotated any pose to a vertical pose, i.e., stand up, sit up, etc...
# extract original segment's x, y
segments_xy = _get_segments_xy(segm=segm, keypoints=keypoints)
# rotated segment to vertical pose, i.e., stand up, sit up, etc...
vertical_segments_xy = _rotate_to_vertical_pose(segments_xy=segments_xy)
# STEP 2: rotate specific segment further to t-pose
tpose_segments_xy = _rotate_to_tpose(segments_xy=vertical_segments_xy)
return tpose_segments_xy
def _euclidian(point1, point2):
return np.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)
def _remove_outlier(segm_xy):
# outlier factor
factor = 2
# mean of [x, y]
xy_mean = np.mean(segm_xy, axis=0)
# mean distance between [x, y] and mean of [x, y]
distance_mean = np.mean([_euclidian(xy, xy_mean) for xy in segm_xy])
# remove outliers from segm_xy
segm_xy_without_outliers = [xy for xy in segm_xy if _euclidian(xy, xy_mean) <= distance_mean * factor]
return segm_xy_without_outliers
def _translate_and_scale_segm_to_convex(image, segm_id, segm_xy, keypoint, ref_point, is_man, is_rect_symmetrical, segm_symmetry_dict, scaler):
# test each segment
# print('Segment ID:', segm_id)
# remove outliers
print('Before removing outliers:', len(segm_xy))
segm_xy = np.array(_remove_outlier(segm_xy=segm_xy)).astype(int)
print('After removing outliers:', len(segm_xy))
min_x, min_y = np.min(segm_xy, axis=0).astype(int)
max_x, max_y = np.max(segm_xy, axis=0).astype(int)
margin = 5
w = int(max_x - min_x + margin*2)
h = int(max_y - min_y + margin*2)
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :, 3] = 0 # alpha channel = 0 -> transparent
# fill the segment with the segment color
contours = [[int(x - min_x + margin), int(y - min_y + margin)] for x, y in segm_xy]
# option 1 - convex hull of [x, y]
contours = np.array(contours, np.int32)
cv2.fillConvexPoly(img_bg, cv2.convexHull(contours), color=COARSE_TO_COLOR[segm_id])
# option 2 - dots on [x, y]
# for x, y in contours:
# cv2.circle(img_bg, (x, y), color=COARSE_TO_COLOR[segm_id], radius=2, thickness=-2)
# assumption: head_radius = 31 -> head_height = 31*2 = 62 -> men; 58 -> women
if segm_id == 'Head' and h > 0:
if is_man:
scaler = 62 / h
else:
scaler = 58 / h
img_bg = cv2.resize(img_bg, (int(w * scaler), int(h * scaler)), cv2.INTER_LINEAR)
h, w, _ = img_bg.shape
# midpoint [x, y] in the scaled coordinates of img_bg
# distance between the center point and the left/upper boundaries
midpoint_x, midpoint_y = ((np.array(keypoint)[0:2] - np.array([min_x, min_y]) + np.array([margin, margin])) * scaler).astype(int)
x, y = ref_point
min_x = int(x - midpoint_x)
max_x = int(x + w - midpoint_x)
min_y = int(y - midpoint_y)
max_y = int(y + h - midpoint_y)
cond_bg = img_bg[:, :, 3] > 0 # condition for already-drawn segment pixels
try:
image[min_y:max_y, min_x:max_x, :][cond_bg] = img_bg[cond_bg]
except:
if segm_id == 'Head':
return scaler
# test each segment
# cv2.circle(img_bg, (midpoint_x, midpoint_y), radius=5,color=(255, 255, 0), thickness=-1)
# cv2.imshow('test', img_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if segm_id == 'Head':
return scaler, None
else:
return None
def _symmetrize_rect_segm(segm_id, w, h, midpoint_x, midpoint_y, segm_symmetry_dict):
if segm_id == 'Head':
segm_symmetry_dict['Head'] = (w, h)
else:
if midpoint_x < w/2:
w = int((w - midpoint_x) * 2)
else:
w = int(midpoint_x * 2)
if midpoint_y < h/2:
h = int((h - midpoint_y) * 2)
else:
h = int(midpoint_y * 2)
if segm_id == 'Torso':
segm_symmetry_dict['Torso'] = (w, h)
elif segm_id == 'RUpperArm':
segm_symmetry_dict['RUpperArm'] = (w, h)
elif segm_id == 'RLowerArm':
segm_symmetry_dict['RLowerArm'] = (w, h)
elif segm_id == 'LUpperArm':
if 'RUpperArm' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RUpperArm']
if w < ref_w:
segm_symmetry_dict['LUpperArm'] = segm_symmetry_dict['RUpperArm']
else:
segm_symmetry_dict['LUpperArm'] = (w, h)
segm_symmetry_dict['RUpperArm'] = (w, h)
else:
segm_symmetry_dict['LUpperArm'] = (w, h)
segm_symmetry_dict['RUpperArm'] = (w, h)
elif segm_id == 'LLowerArm':
if 'RLowerArm' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RLowerArm']
if w < ref_w:
segm_symmetry_dict['LLowerArm'] = segm_symmetry_dict['RLowerArm']
else:
segm_symmetry_dict['LLowerArm'] = (w, h)
segm_symmetry_dict['RLowerArm'] = (w, h)
else:
segm_symmetry_dict['LLowerArm'] = (w, h)
segm_symmetry_dict['RLowerArm'] = (w, h)
elif segm_id == 'RThigh':
segm_symmetry_dict['RThigh'] = (w, h)
elif segm_id == 'RCalf':
segm_symmetry_dict['RCalf'] = (w, h)
elif segm_id == 'LThigh':
if 'RThigh' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RThigh']
if h < ref_h:
segm_symmetry_dict['LThigh'] = segm_symmetry_dict['RThigh']
else:
segm_symmetry_dict['LThigh'] = (w, h)
segm_symmetry_dict['RThigh'] = (w, h)
else:
segm_symmetry_dict['LThigh'] = (w, h)
segm_symmetry_dict['RThigh'] = (w, h)
elif segm_id == 'LCalf':
if 'RCalf' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RCalf']
if h < ref_h:
segm_symmetry_dict['LCalf'] = segm_symmetry_dict['RCalf']
else:
segm_symmetry_dict['LCalf'] = (w, h)
segm_symmetry_dict['RCalf'] = (w, h)
else:
segm_symmetry_dict['LCalf'] = (w, h)
segm_symmetry_dict['RCalf'] = (w, h)
def _draw_symmetrical_rect_segm(image, segm_id, w_and_h, ref_point, update_dict=True):
w, h = w_and_h
# update output_dict
if update_dict:
global output_dict
output_dict[segm_id + '_w'] = w
output_dict[segm_id + '_h'] = h
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :] = COARSE_TO_COLOR[segm_id]
midpoint_x = w / 2
midpoint_y = h / 2
x, y = ref_point
min_x = int(x - midpoint_x)
max_x = int(x + midpoint_x)
min_y = int(y - midpoint_y)
max_y = int(y + midpoint_y)
try:
added_image = cv2.addWeighted(image[min_y:max_y, min_x:max_x, :], 0.1, img_bg, 0.9, 0)
image[min_y:max_y, min_x:max_x, :] = added_image
except:
pass
def _translate_and_scale_segm_to_rect(image, segm_id, segm_xy, keypoint, ref_point, is_man, is_rect_symmetrical, segm_symmetry_dict, scaler):
# test each segment
print('Segment ID:', segm_id)
# remove outliers
print('Before removing outliers:', len(segm_xy))
segm_xy = np.array(_remove_outlier(segm_xy=segm_xy)).astype(int)
print('After removing outliers:', len(segm_xy))
min_x, min_y = | np.min(segm_xy, axis=0) | numpy.min |
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import OrderedDict
from tensorflow.python.keras.models import load_model
from pkg_resources import resource_filename
from transomaly.prepare_input import PrepareInputArrays
from transomaly.loss_functions import mean_squared_error, chisquare_loss, mean_squared_error_over_error
matplotlib.use('TkAgg')
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
COLPB = {'g': 'tab:green', 'r': 'tab:red'}
MARKPB = {'g': 'o', 'r': 's', 'z': 'd'}
ALPHAPB = {'g': 0.3, 'r': 1., 'z': 1}
CLASS_COLOR = {'SNIa-norm': 'tab:green', 'SNIbc': 'tab:orange', 'SNII': 'tab:blue', 'SNIIn': 'blue',
'SNIa-91bg': 'tab:red', 'SNIa-x': 'bisque', 'point-Ia': 'tab:brown', 'Kilonova': '#aaffc3',
'SLSN-I': 'tab:olive', 'PISN': 'tab:cyan', 'ILOT': '#FF1493', 'CART': 'navy', 'TDE': 'tab:pink',
'AGN': 'tab:purple'}
# npred = 7
# model_filepath_onepoint_infuture = "/Users/danmuth/OneDrive - University of Cambridge/PycharmProjects/transomaly/plots/model__ci()_ns1_c(1,)/keras_model_epochs300_onepoint_pred7timesteps_infuture_normalised_predict_last49_timesteps_nodropout_100lstmneurons/keras_model_epochs300_onepoint_pred7timesteps_infuture_normalised_predict_last49_timesteps_nodropout_100lstmneurons.hdf5"
model_filepath = "/Users/danmuth/OneDrive - University of Cambridge/PycharmProjects/transomaly/plots/model__ci()_ns1_c(1,)/keras_model_epochs500_pred2timesteps_normalised_predict_last49_timesteps_nodropout_100lstmneurons/keras_model_epochs500_pred2timesteps_normalised_predict_last49_timesteps_nodropout_100lstmneurons.hdf5"
model = load_model(model_filepath, custom_objects={'loss': mean_squared_error()})
passbands = ('g','r')
contextual_info = ()
X = np.array([np.array([[0.09158034, 0.07176773],
[0.09008677, 0.07137485],
[0.08917016, 0.0727186 ],
[0.09029362, 0.07340094],
[0.08999084, 0.07224263],
[0.08900606, 0.07197019],
[0.08739904, 0.07095805],
[0.08682939, 0.07071227],
[0.08567506, 0.07031706],
[0.08386102, 0.07021409],
[0.0836062 , 0.0698832 ],
[0.08332578, 0.07050169],
[0.08397429, 0.06937913],
[0.0860095 , 0.0716971 ],
[0.09076107, 0.07780995],
[0.09871331, 0.08934081],
[0.11663096, 0.11012718],
[0.15700709, 0.15327507],
[0.23417453, 0.22872161],
[0.35862168, 0.33963544],
[0.51792838, 0.48456903],
[0.68616083, 0.64401235],
[0.81866526, 0.79203408],
[0.90768435, 0.89154676],
[0.93726188, 0.94250969],
[0.92312219, 0.95238174],
[0.85896026, 0.93542825],
[0.74499556, 0.87812468],
[0.62114839, 0.78786493],
[0.50529031, 0.6932507 ],
[0.40645818, 0.6036984 ],
[0.32993884, 0.52479434],
[0.27163636, 0.45807863],
[0.22780797, 0.40140124],
[0.19817062, 0.35339631],
[0.17674938, 0.31370617],
[0.16142444, 0.27973599],
[0.14923072, 0.25118097],
[0.13910386, 0.22674725],
[0.13288632, 0.20717057],
[0.12727512, 0.191151 ],
[0.12253798, 0.17674126],
[0.11748478, 0.16368262],
[0.11428479, 0.15247652],
[0.110435 , 0.14285344],
[0.10726069, 0.13410306],
[0.10317767, 0.12546451],
[0.10019489, 0.11729226],
[0.09654251, 0.11025106]])])
y = np.array([np.array([[0.09008677, 0.07137485],
[0.08917016, 0.0727186 ],
[0.09029362, 0.07340094],
[0.08999084, 0.07224263],
[0.08900606, 0.07197019],
[0.08739904, 0.07095805],
[0.08682939, 0.07071227],
[0.08567506, 0.07031706],
[0.08386102, 0.07021409],
[0.0836062 , 0.0698832 ],
[0.08332578, 0.07050169],
[0.08397429, 0.06937913],
[0.0860095 , 0.0716971 ],
[0.09076107, 0.07780995],
[0.09871331, 0.08934081],
[0.11663096, 0.11012718],
[0.15700709, 0.15327507],
[0.23417453, 0.22872161],
[0.35862168, 0.33963544],
[0.51792838, 0.48456903],
[0.68616083, 0.64401235],
[0.81866526, 0.79203408],
[0.90768435, 0.89154676],
[0.93726188, 0.94250969],
[0.92312219, 0.95238174],
[0.85896026, 0.93542825],
[0.74499556, 0.87812468],
[0.62114839, 0.78786493],
[0.50529031, 0.6932507 ],
[0.40645818, 0.6036984 ],
[0.32993884, 0.52479434],
[0.27163636, 0.45807863],
[0.22780797, 0.40140124],
[0.19817062, 0.35339631],
[0.17674938, 0.31370617],
[0.16142444, 0.27973599],
[0.14923072, 0.25118097],
[0.13910386, 0.22674725],
[0.13288632, 0.20717057],
[0.12727512, 0.191151 ],
[0.12253798, 0.17674126],
[0.11748478, 0.16368262],
[0.11428479, 0.15247652],
[0.110435 , 0.14285344],
[0.10726069, 0.13410306],
[0.10317767, 0.12546451],
[0.10019489, 0.11729226],
[0.09654251, 0.11025106],
[0.09630747, 0.10437187]])])
timesX = np.array([np.array([-70., -67., -64., -61., -58., -55., -52., -49., -46., -43., -40.,
-37., -34., -31., -28., -25., -22., -19., -16., -13., -10., -7.,
-4., -1., 2., 5., 8., 11., 14., 17., 20., 23., 26.,
29., 32., 35., 38., 41., 44., 47., 50., 53., 56., 59.,
62., 65., 68., 71., 74., 77.])])
objids = | np.array(['median_Ia']) | numpy.array |
# coding: utf-8
# # Assignment 2
#
# Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.
#
# An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d100/4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110.csv`. The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe.
#
# Each row in the assignment datafile corresponds to a single observation.
#
# The following variables are provided to you:
#
# * **id** : station identification code
# * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012)
# * **element** : indicator of element type
# * TMAX : Maximum temperature (tenths of degrees C)
# * TMIN : Minimum temperature (tenths of degrees C)
# * **value** : data value for element (tenths of degrees C)
#
# For this assignment, you must:
#
# 1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day should be shaded.
# 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015.
# 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization.
# 4. Make the visual nice! Leverage principles from the first module in this course when developing your solution. Consider issues such as legends, labels, and chart junk.
#
# The data you have been given is near **None, None, Singapore**, and the stations the data comes from are shown on the map below.
# In[1]:
import matplotlib.pyplot as plt
import mplleaflet
import pandas as pd
def leaflet_plot_stations(binsize, hashid):
df = pd.read_csv('data/C2A2_data/BinSize_d{}.csv'.format(binsize))
station_locations_by_hash = df[df['hash'] == hashid]
lons = station_locations_by_hash['LONGITUDE'].tolist()
lats = station_locations_by_hash['LATITUDE'].tolist()
plt.figure(figsize=(8,8))
plt.scatter(lons, lats, c='r', alpha=0.7, s=200)
return mplleaflet.display()
leaflet_plot_stations(100,'4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110')
# In[2]:
df = pd.read_csv('data/C2A2_data/BinnedCsvs_d100/4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110.csv')
# In[3]:
df.sort(['ID','Date']).head()
# In[4]:
df['Year'], df['Month-Date'] = zip(*df['Date'].apply(lambda x: (x[:4], x[5:])))
df = df[df['Month-Date'] != '02-29']
# In[5]:
import numpy as np
temp_min = df[(df['Element'] == 'TMIN') & (df['Year'] != '2015')].groupby('Month-Date').aggregate({'Data_Value':np.min})
temp_max = df[(df['Element'] == 'TMAX') & (df['Year'] != '2015')].groupby('Month-Date').aggregate({'Data_Value':np.max})
# In[6]:
temp_min.head()
# In[7]:
temp_min_15 = df[(df['Element'] == 'TMIN') & (df['Year'] == '2015')].groupby('Month-Date').aggregate({'Data_Value':np.min})
temp_max_15 = df[(df['Element'] == 'TMAX') & (df['Year'] == '2015')].groupby('Month-Date').aggregate({'Data_Value':np.max})
# In[8]:
broken_min = | np.where(temp_min_15['Data_Value'] < temp_min['Data_Value']) | numpy.where |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import time
import heapq
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import pickle
import pdb
from scipy.special import expit
from sklearn.metrics import confusion_matrix
from tensorflow.keras.models import load_model
from sys import stdout
from tensorflow.keras.callbacks import Callback
from collections import defaultdict, namedtuple
from multiprocessing import Pool
from random import sample, shuffle
from glob import glob
class F1Score(Callback):
# this is really heavy handed!
# have to evaluate the set twice
def __init__(self, validation_data, n_classes, model_out_path, batch_size=4, two_headed_net=False):
super(F1Score, self).__init__()
self.validation_data = validation_data
self.batch_size = batch_size
self.n_classes = n_classes
self.model_out_path = os.path.splitext(model_out_path)[0]
self.two_headed_net = two_headed_net
if self.two_headed_net:
self.model_out_path += "epoch-{}-f1-{}.h5"
else:
self.model_out_path += "epoch-{}-f1-{}.h5"
self.f1_scores = []
def on_train_begin(self, logs={}):
pass
def on_epoch_end(self, epochs, logs):
# 5.4.1 For each validation batch
cmat, prec, recall = confusion_matrix_from_generator(self.validation_data,
batch_size=self.batch_size, model=self.model, n_classes=self.n_classes,
multi_output=self.two_headed_net)
print('n pixels per class:', np.sum(cmat, axis=1))
print(prec)
print(recall)
precision_irrigated = prec[0]
recall_irrigated = recall[0]
f1 = 2*(precision_irrigated * recall_irrigated) / (precision_irrigated + recall_irrigated)
if np.isnan(f1):
return
outp = self.model_out_path.format(epochs, f1)
print('saving', outp)
if not os.path.isfile(outp):
self.model.save(outp) # maybe get some space savings
return
def softmax(arr, count_dim=0):
arr = np.exp(arr)
arr /= ( | np.sum(arr, axis=count_dim, keepdims=True) | numpy.sum |
import numpy as np
import pytest
import numpy.testing as npt
from collections import OrderedDict
from pulse2percept.implants import (DiskElectrode, PointSource,
ElectrodeArray, ElectrodeGrid)
def test_ElectrodeArray():
with pytest.raises(TypeError):
ElectrodeArray("foo")
with pytest.raises(TypeError):
ElectrodeArray(OrderedDict({'A1': 0}))
with pytest.raises(TypeError):
ElectrodeArray([0])
# Empty array:
earray = ElectrodeArray([])
npt.assert_equal(earray.n_electrodes, 0)
# npt.assert_equal(earray[0], None)
npt.assert_equal(earray['A01'], None)
with pytest.raises(TypeError):
earray[PointSource(0, 0, 0)]
ElectrodeArray([])
# A single electrode:
earray = ElectrodeArray(PointSource(0, 1, 2))
npt.assert_equal(earray.n_electrodes, 1)
npt.assert_equal(isinstance(earray[0], PointSource), True)
npt.assert_equal(isinstance(earray[[0]], list), True)
npt.assert_equal(isinstance(earray[[0]][0], PointSource), True)
npt.assert_almost_equal(earray[0].x, 0)
npt.assert_almost_equal(earray[0].y, 1)
npt.assert_almost_equal(earray[0].z, 2)
# Indexing:
ps1, ps2 = PointSource(0, 0, 0), PointSource(1, 1, 1)
earray = ElectrodeArray({'A01': ps1, 'D07': ps2})
npt.assert_equal(earray['A01'], ps1)
npt.assert_equal(earray['D07'], ps2)
# Slots:
npt.assert_equal(hasattr(earray, '__slots__'), True)
npt.assert_equal(hasattr(earray, '__dict__'), False)
def test_ElectrodeArray_add_electrode():
earray = ElectrodeArray([])
npt.assert_equal(earray.n_electrodes, 0)
with pytest.raises(TypeError):
earray.add_electrode('A01', ElectrodeArray([]))
# Add an electrode:
key0 = 'A04'
earray.add_electrode(key0, PointSource(0, 1, 2))
npt.assert_equal(earray.n_electrodes, 1)
# Both numeric and string index should work:
for key in [key0, 0]:
npt.assert_equal(isinstance(earray[key], PointSource), True)
npt.assert_almost_equal(earray[key].x, 0)
npt.assert_almost_equal(earray[key].y, 1)
npt.assert_almost_equal(earray[key].z, 2)
with pytest.raises(ValueError):
# Can't add the same electrode twice:
earray.add_electrode(key0, PointSource(0, 1, 2))
# Add another electrode:
key1 = 'A01'
earray.add_electrode(key1, DiskElectrode(4, 5, 6, 7))
npt.assert_equal(earray.n_electrodes, 2)
# Both numeric and string index should work:
for key in [key1, 1]:
npt.assert_equal(isinstance(earray[key], DiskElectrode), True)
npt.assert_almost_equal(earray[key].x, 4)
npt.assert_almost_equal(earray[key].y, 5)
npt.assert_almost_equal(earray[key].z, 6)
npt.assert_almost_equal(earray[key].r, 7)
# We can also get a list of electrodes:
for keys in [[key0, key1], [0, key1], [key0, 1], [0, 1]]:
selected = earray[keys]
npt.assert_equal(isinstance(selected, list), True)
npt.assert_equal(isinstance(selected[0], PointSource), True)
npt.assert_equal(isinstance(selected[1], DiskElectrode), True)
def test_ElectrodeArray_remove_electrode():
earray1 = ElectrodeArray([])
earray2 = ElectrodeArray([])
npt.assert_equal(earray1.n_electrodes, 0)
# Can't remove electrodes from empty electrodeArray
with pytest.raises(ValueError):
earray1.remove_electrode(None)
with pytest.raises(ValueError):
earray1.remove_electrode("foo")
key = [0] * 4
key[0] = 'D03'
key[1] = 'A02'
key[2] = 'F10'
key[3] = 'E12'
earray1.add_electrode(key[0], PointSource(0, 1, 2))
earray1.add_electrode(key[1], PointSource(3, 4, 5))
earray1.add_electrode(key[2], PointSource(6, 7, 8))
earray1.add_electrode(key[3], PointSource(9, 10, 11))
npt.assert_equal(earray1.n_electrodes, 4)
earray2.add_electrode(key[0], PointSource(0, 1, 2))
earray2.add_electrode(key[1], PointSource(3, 4, 5))
earray2.add_electrode(key[2], PointSource(6, 7, 8))
earray2.add_electrode(key[3], PointSource(9, 10, 11))
npt.assert_equal(earray2.n_electrodes, 4)
# Remove one electrode key[1] from the electrodeArray
earray1.remove_electrode(key[0])
npt.assert_equal(earray1.n_electrodes, 3)
# Can't remove an electrode that has been removed
with pytest.raises(ValueError):
earray1.remove_electrode(key[0])
# List keeps order:
npt.assert_equal(earray1[0], earray1[key[1]])
npt.assert_equal(earray1[1], earray1[key[2]])
| npt.assert_equal(earray1[2], earray1[key[3]]) | numpy.testing.assert_equal |
import unittest, os
import numpy as np
import tensorflow as tf
from ovejero import bnn_alexnet
from scipy.stats import multivariate_normal
# Eliminate TF warning in tests
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class BNNTests(unittest.TestCase):
def setUp(self):
self.random_seed = 1234
tf.random.set_seed(self.random_seed)
np.random.seed(self.random_seed)
def tearDown(self):
# Make sure we don't have any models lingering in memory.
tf.keras.backend.clear_session()
def test_AlwaysDropout(self):
# Test that the implementation of Always dropout behaves as expected.
# Start with no dropout and make sure that behaves how you want it to.
input_layer = tf.ones((200,200,200))
dropout_rate = 0
d_layer = bnn_alexnet.AlwaysDropout(dropout_rate)
output_layer = d_layer(input_layer)
np.testing.assert_equal(input_layer.numpy(),output_layer.numpy())
dropout_rate = 0.1
d_layer = bnn_alexnet.AlwaysDropout(dropout_rate)
output_layer = d_layer(input_layer)
# Test that the two arrays aren't equal.
self.assertGreater(np.mean(np.abs(input_layer.numpy()-output_layer.numpy()
)),0)
# Test that the mean value hasn't changed (remember we divide the output
# by the dropout rate so the mean is unchanged)
self.assertAlmostEqual(np.mean(input_layer.numpy()),
np.mean(output_layer.numpy()),places=3)
# Test that the median value is as expected.
self.assertAlmostEqual(np.median(output_layer.numpy()),1/0.9,places=5)
# Repeat the above tests for other dropout rates.
dropout_rate = 0.5
d_layer = bnn_alexnet.AlwaysDropout(dropout_rate)
output_layer = d_layer(input_layer)
self.assertGreater(np.mean(np.abs(input_layer.numpy()-output_layer.numpy()
)),0)
self.assertAlmostEqual(np.mean(input_layer.numpy()),
np.mean(output_layer.numpy()),places=2)
dropout_rate = 0.9
d_layer = bnn_alexnet.AlwaysDropout(dropout_rate)
output_layer = d_layer(input_layer)
self.assertGreater(np.mean(np.abs(input_layer.numpy()-output_layer.numpy()
)),0)
self.assertAlmostEqual(np.mean(input_layer.numpy()),
np.mean(output_layer.numpy()),places=2)
self.assertEqual(np.median(output_layer.numpy()),0.0)
def test_ConcreteDropout(self):
# Test that our implementation of ConcreteDropout works as expected.
output_dim = 100
activation = 'relu'
kernel_regularizer = 1e-6
dropout_regularizer = 1e-5
init_min = 0.1
init_max = 0.1
input_shape = (None,200)
cd_layer = bnn_alexnet.ConcreteDropout(output_dim,activation=activation,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max)
cd_layer.build(input_shape=input_shape)
# Check that all of the weights have the right shapes
kernel = cd_layer.weights[0]
bias = cd_layer.weights[1]
p_logit = cd_layer.weights[2]
self.assertListEqual(list(kernel.shape),[200,100])
self.assertListEqual(list(bias.shape),[100])
self.assertListEqual(list(p_logit.shape),[1])
# Check that the initializations worked as we wanted them to
self.assertEqual(np.sum(bias.numpy()),0)
self.assertEqual(p_logit.numpy(),np.log(0.1)-np.log(1-0.1))
# Check that the losses for the layer is what we would expect for
# concrete dropout.
p_logit_reg = cd_layer.losses[0].numpy()
kernel_reg = cd_layer.losses[1].numpy()
# We know what we set p to
p = 0.1
p_logit_correct = p * np.log(p) + (1-p)*np.log(1-p)
p_logit_correct *= dropout_regularizer * 200
self.assertAlmostEqual(p_logit_reg, p_logit_correct)
kernel_correct = kernel_regularizer * np.sum(np.square(
kernel.numpy())) / (1-p)
self.assertAlmostEqual(kernel_reg, kernel_correct)
# Now check that the call function doesn't return the same value each
# time
false_input = tf.constant((np.random.rand(1,200)),dtype=tf.float32)
output1 = cd_layer(false_input).numpy()
output2 = cd_layer(false_input).numpy()
self.assertGreater(np.sum(np.abs(output1-output2)),1)
def test_SpatialConcreteDropout(self):
# Test that our implementation of ConcreteDropout works as expected.
filters = 64
kernel_size = (5,5)
activation = 'relu'
kernel_regularizer = 1e-6
dropout_regularizer = 1e-5
init_min = 0.1
init_max = 0.1
input_shape = (None,20,20,64)
cd_layer = bnn_alexnet.SpatialConcreteDropout(filters, kernel_size,
activation=activation,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max)
cd_layer.build(input_shape=input_shape)
# Check that all of the weights have the right shapes
kernel = cd_layer.weights[0]
bias = cd_layer.weights[1]
p_logit = cd_layer.weights[2]
self.assertListEqual(list(kernel.shape),[5,5,64,64])
self.assertListEqual(list(bias.shape),[64])
self.assertListEqual(list(p_logit.shape),[1])
# Check that the initializations worked as we wanted them to
self.assertEqual(np.sum(bias.numpy()),0)
self.assertEqual(p_logit.numpy(),np.log(0.1)-np.log(1-0.1))
# Check that the losses for the layer is what we would expect for
# concrete dropout.
p_logit_reg = cd_layer.losses[0].numpy()
kernel_reg = cd_layer.losses[1].numpy()
# We know what we set p to
p = 0.1
p_logit_correct = p * np.log(p) + (1-p)*np.log(1-p)
p_logit_correct *= dropout_regularizer * 64
self.assertAlmostEqual(p_logit_reg, p_logit_correct)
kernel_correct = kernel_regularizer * np.sum(np.square(
kernel.numpy())) / (1-p)
self.assertAlmostEqual(kernel_reg, kernel_correct)
# Now check that the call function doesn't return the same value each
# time
false_input = tf.constant((np.random.rand(1,20,20,64)),dtype=tf.float32)
output1 = cd_layer(false_input).numpy()
output2 = cd_layer(false_input).numpy()
self.assertGreater(np.sum(np.abs(output1-output2)),1)
def test_concrete_alexnet(self):
# Test that the models initialized agree with what we intended
layer_names = ['input','spatial_concrete_dropout','max_pooling2d',
'spatial_concrete_dropout','max_pooling2d',
'spatial_concrete_dropout','spatial_concrete_dropout',
'spatial_concrete_dropout','max_pooling2d','flatten',
'concrete_dropout','concrete_dropout','concrete_dropout']
image_size = (100,100,1)
num_params = 8
model = bnn_alexnet.concrete_alexnet(image_size, num_params,
kernel_regularizer=1e-6,dropout_regularizer=1e-5)
input_shapes = [[],(100,100,1),(48,48,64),
(24,24,64),(24,24,192),(12,12,192),(12,12,384),(12,12,384),
(12,12,256),(6,6,256),(9216,),(4096,),(4096,)]
output_shapes = [[]]+input_shapes[2:] + [(num_params,)]
l_i = 0
# All I can really check is that the layers are of the right type and
# have the right shapes
for layer in model.layers:
self.assertTrue(layer_names[l_i] in layer.name)
self.assertEqual(layer.dtype,tf.float32)
self.assertEqual(layer.input_shape[1:],input_shapes[l_i])
self.assertEqual(layer.output_shape[1:],output_shapes[l_i])
# Check that all the concrete dropout layer except the last have
# a ReLU activation function.
if 'concrete' in layer.name and l_i < len(model.layers)-1:
self.assertEqual(layer.activation,tf.keras.activations.relu)
l_i += 1
def test_dropout_alexnet(self):
# Test that the models initialized agree with what we intended
layer_names = ['input','always_dropout','conv2d','max_pooling2d',
'always_dropout','conv2d','max_pooling2d','always_dropout',
'conv2d','always_dropout','conv2d','always_dropout',
'conv2d','max_pooling2d','flatten','always_dropout','dense',
'always_dropout','dense','always_dropout','dense']
image_size = (100,100,1)
num_params = 8
# Kernel regularizer and dropout rate
kr = 1e-6
dr = 0.1
model = bnn_alexnet.dropout_alexnet(image_size, num_params,
kernel_regularizer=kr,dropout_rate=dr)
input_shapes = [[],(100,100,1),(100,100,1),(48,48,64),
(24,24,64),(24,24,64),(24,24,192),(12,12,192),(12,12,192),
(12,12,384),(12,12,384),(12,12,384),(12,12,384),(12,12,256),
(6,6,256),(9216,),(9216,),(4096,),(4096,),(4096,),(4096,)]
output_shapes = [[]]+input_shapes[2:] + [(num_params,)]
# All I can really check is that the layers are of the right type and
# have the right shapes
for l_i, layer in enumerate(model.layers):
self.assertTrue(layer_names[l_i] in layer.name)
self.assertEqual(layer.dtype,tf.float32)
self.assertEqual(layer.input_shape[1:],input_shapes[l_i])
self.assertEqual(layer.output_shape[1:],output_shapes[l_i])
# Check that all the concrete dropout layer except the last have
# a ReLU activation function.
if 'conv2d' in layer.name:
self.assertEqual(layer.activation,tf.keras.activations.relu)
self.assertEqual(layer.kernel_regularizer.l2,np.array(kr*(1-dr),
dtype=np.float32))
if 'dense' in layer.name and l_i < len(model.layers)-1:
self.assertEqual(layer.activation,tf.keras.activations.relu)
self.assertEqual(layer.kernel_regularizer.l2,np.array(kr*(1-dr),
dtype=np.float32))
# Repeat the test for dropout of 0
layer_names = ['input','conv2d','max_pooling2d','conv2d',
'max_pooling2d','conv2d','conv2d','conv2d','max_pooling2d','flatten',
'dense','dense','dense']
image_size = (100,100,1)
num_params = 8
dr = 0.0
model = bnn_alexnet.dropout_alexnet(image_size, num_params,
kernel_regularizer=kr,dropout_rate=dr)
input_shapes = [[],(100,100,1),(48,48,64),
(24,24,64),(24,24,192),(12,12,192),
(12,12,384),(12,12,384),(12,12,256),
(6,6,256),(9216,),(4096,),(4096,)]
output_shapes = [[]]+input_shapes[2:] + [(num_params,)]
# All I can really check is that the layers are of the right type and
# have the right shapes
for l_i, layer in enumerate(model.layers):
self.assertTrue(layer_names[l_i] in layer.name)
self.assertEqual(layer.dtype,tf.float32)
self.assertEqual(layer.input_shape[1:],input_shapes[l_i])
self.assertEqual(layer.output_shape[1:],output_shapes[l_i])
# Check that all the concrete dropout layer except the last have
# a ReLU activation function.
if 'conv2d' in layer.name:
self.assertEqual(layer.activation,tf.keras.activations.relu)
self.assertEqual(layer.kernel_regularizer.l2,np.array(kr*(1-dr),
dtype=np.float32))
if 'dense' in layer.name and l_i < len(model.layers)-1:
self.assertEqual(layer.activation,tf.keras.activations.relu)
self.assertEqual(layer.kernel_regularizer.l2,np.array(kr*(1-dr),
dtype=np.float32))
class LensingLossFunctionsTests(unittest.TestCase):
def setUp(self):
# Set a seed to make sure that the behaviour of all the test functions
# is consistent.
np.random.seed(2)
def test_mse_loss(self):
# Test that for a variety of number of parameters and bnn types, the
# algorithm always returns the MSE loss.
flip_pairs = []
for num_params in range(1,20):
# Diagonal covariance
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
y_true = np.random.randn(num_params).reshape(1,-1)
y_pred = np.random.randn(num_params*2).reshape(1,-1)
mse_tensor = loss_class.mse_loss(tf.constant(y_true,dtype=tf.float32),
tf.constant(y_pred,dtype=tf.float32))
self.assertAlmostEqual(mse_tensor.numpy()[0],np.mean(np.square(
y_true-y_pred[:,:num_params])),places=5)
# Full covariance
y_true = np.random.randn(num_params).reshape(1,-1)
y_pred = np.random.randn(int(num_params*(num_params+1)/2)).reshape(
1,-1)
mse_tensor = loss_class.mse_loss(tf.constant(y_true,dtype=tf.float32),
tf.constant(y_pred,dtype=tf.float32))
self.assertAlmostEqual(mse_tensor.numpy()[0],np.mean(np.square(
y_true-y_pred[:,:num_params])),places=5)
# GMM two matrices full covariance
y_true = np.random.randn(num_params).reshape(1,-1)
y_pred = np.random.randn(2*(num_params + int(
num_params*(num_params+1)/2))+1).reshape(1,-1)
mse_tensor = loss_class.mse_loss(tf.constant(y_true,dtype=tf.float32),
tf.constant(y_pred,dtype=tf.float32))
self.assertAlmostEqual(mse_tensor.numpy()[0],np.mean(np.square(
y_true-y_pred[:,:num_params])),places=5)
# Now an explicit test that flip_pairs is working
flip_pairs = [[1,2]]
num_params = 5
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
y_true = np.ones((4,num_params))
y_pred = np.ones((4,num_params))
y_pred[:,1:3] *= -1
mse_tensor = loss_class.mse_loss(tf.constant(y_true,dtype=tf.float32),
tf.constant(y_pred,dtype=tf.float32))
self.assertEqual(np.sum(mse_tensor.numpy()),0)
# Make sure flipping other pairs does not return 0
y_pred[:,4] *= -1
mse_tensor = loss_class.mse_loss(tf.constant(y_true,dtype=tf.float32),
tf.constant(y_pred,dtype=tf.float32))
self.assertGreater(np.sum(mse_tensor.numpy()),0.1)
def test_log_gauss_diag(self):
# Will not be used for this test, but must be passed in.
flip_pairs = []
for num_params in range(1,20):
# Pick a random true, pred, and std and make sure it agrees with the
# scipy calculation
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
y_true = np.random.randn(num_params)
y_pred = np.random.randn(num_params)
std_pred = np.random.randn(num_params)
nlp_tensor = loss_class.log_gauss_diag(tf.constant(y_true),
tf.constant(y_pred),tf.constant(std_pred))
# Compare to scipy function to be exact. Add 2 pi offset.
scipy_nlp = -multivariate_normal.logpdf(y_true,y_pred,
np.diag(np.exp(std_pred))) - np.log(2 * np.pi) * num_params/2
self.assertAlmostEqual(nlp_tensor.numpy(),scipy_nlp)
def test_diagonal_covariance_loss(self):
# Test that the diagonal covariance loss gives the correct values
flip_pairs = [[1,2],[3,4],[1,2,3,4]]
num_params = 6
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
# Set up a couple of test function to make sure that the minimum loss
# is taken
y_true = np.ones((1,num_params))
y_pred = np.ones((1,num_params))
y_pred1 = np.ones((1,num_params)); y_pred1[:,[1,2]] = -1
y_pred2 = np.ones((1,num_params)); y_pred2[:,[3,4]] = -1
y_pred3 = np.ones((1,num_params)); y_pred3[:,[1,2,3,4]] = -1
y_preds = [y_pred,y_pred1,y_pred2,y_pred3]
std_pred = np.ones((1,num_params))
# The correct value of the nlp
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),y_pred.flatten(),
np.diag(np.exp(std_pred.flatten()))) -np.log(2 * np.pi)*num_params/2
for yp in y_preds:
yptf = tf.constant(np.concatenate([yp,std_pred],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.diagonal_covariance_loss(yttf,yptf)
self.assertAlmostEqual(diag_loss.numpy(),scipy_nlp)
# Repeat this excercise, but introducing error in prediction
for yp in y_preds:
yp[:,0] = 10
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),y_pred.flatten(),
np.diag(np.exp(std_pred.flatten()))) -np.log(2 * np.pi)*num_params/2
for yp in y_preds:
yptf = tf.constant(np.concatenate([yp,std_pred],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.diagonal_covariance_loss(yttf,yptf)
self.assertAlmostEqual(diag_loss.numpy(),scipy_nlp)
# Confirm that when the wrong pair is flipped, it does not
# return the same answer.
y_pred4 = np.ones((1,num_params))
y_pred4[:,[5,2]] = -1
y_pred4[:,0] = 10
yptf = tf.constant(np.concatenate([y_pred4,std_pred],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.diagonal_covariance_loss(yttf,yptf)
self.assertGreater(np.abs(diag_loss.numpy()-scipy_nlp),1)
# Make sure it is still consistent with the true nlp
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),
y_pred4.flatten(),
np.diag(np.exp(std_pred.flatten()))) -np.log(2 * np.pi)*num_params/2
self.assertAlmostEqual(diag_loss.numpy(),scipy_nlp)
# Finally, confirm that batching works
yptf = tf.constant(np.concatenate(
[np.concatenate([y_pred,std_pred],axis=-1),
np.concatenate([y_pred1,std_pred],axis=-1)],axis=0),dtype=tf.float32)
self.assertEqual(yptf.shape,[2,12])
diag_loss = loss_class.diagonal_covariance_loss(yttf,yptf).numpy()
self.assertEqual(diag_loss.shape,(2,))
self.assertEqual(diag_loss[0],diag_loss[1])
def test_construct_precision_matrix(self):
# A couple of test cases to make sure that the generalized precision
# matrix code works as expected.
num_params = 4
flip_pairs = []
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
# Set up a fake l matrix with elements
l_mat_elements = np.array([[1,2,3,4,5,6,7,8,9,10]],dtype=float)
l_mat = np.array([[np.exp(1),0,0,0],[2,np.exp(3),0,0],[4,5,np.exp(6),0],
[7,8,9,np.exp(10)]])
prec_mat = np.matmul(l_mat,l_mat.T)
# Get the tf representation of the prec matrix
l_mat_elements_tf = tf.constant(l_mat_elements)
p_mat_tf, diag_tf, L_mat = loss_class.construct_precision_matrix(
l_mat_elements_tf)
# Make sure everything matches
np.testing.assert_almost_equal(p_mat_tf.numpy()[0],prec_mat,decimal=5)
diag_elements = np.array([1,3,6,10])
np.testing.assert_almost_equal(diag_tf.numpy()[0],diag_elements)
for pi, p_mat_np in enumerate(p_mat_tf.numpy()):
np.testing.assert_almost_equal(p_mat_np,np.dot(
L_mat.numpy()[pi],L_mat.numpy()[pi].T))
# Rinse and repeat for a different number of elements with batching
num_params = 3
flip_pairs = []
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
# Set up a fake l matrix with elements
l_mat_elements = np.array([[1,2,3,4,5,6],[1,2,3,4,5,6]],dtype=float)
l_mat = np.array([[np.exp(1),0,0],[2,np.exp(3),0],[4,5,np.exp(6)]])
prec_mat = np.matmul(l_mat,l_mat.T)
# Get the tf representation of the prec matrix
l_mat_elements_tf = tf.constant(l_mat_elements)
p_mat_tf, diag_tf, _ = loss_class.construct_precision_matrix(
l_mat_elements_tf)
# Make sure everything matches
for p_mat in p_mat_tf.numpy():
np.testing.assert_almost_equal(p_mat,prec_mat)
diag_elements = np.array([1,3,6])
for diag in diag_tf.numpy():
np.testing.assert_almost_equal(diag,diag_elements)
def test_log_gauss_full(self):
# Will not be used for this test, but must be passed in.
flip_pairs = []
for num_params in range(1,10):
# Pick a random true, pred, and std and make sure it agrees with the
# scipy calculation
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
y_true = np.random.randn(num_params)
y_pred = np.random.randn(num_params)
l_mat_elements_tf = tf.constant(
np.expand_dims(np.random.randn(int(num_params*(num_params+1)/2)),
axis=0),dtype=tf.float32)
p_mat_tf, L_diag, _ = loss_class.construct_precision_matrix(
l_mat_elements_tf)
p_mat = p_mat_tf.numpy()[0]
nlp_tensor = loss_class.log_gauss_full(tf.constant(np.expand_dims(
y_true,axis=0),dtype=float),tf.constant(np.expand_dims(
y_pred,axis=0),dtype=float),p_mat_tf,L_diag)
# Compare to scipy function to be exact. Add 2 pi offset.
scipy_nlp = (-multivariate_normal.logpdf(y_true,y_pred,np.linalg.inv(
p_mat)) - np.log(2 * np.pi) * num_params/2)
# The decimal error can be significant due to inverting the precision
# matrix
self.assertAlmostEqual(np.sum(nlp_tensor.numpy()),scipy_nlp,places=1)
def test_full_covariance_loss(self):
# Test that the diagonal covariance loss gives the correct values
flip_pairs = [[1,2],[3,4],[1,2,3,4]]
num_params = 6
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
# Set up a couple of test function to make sure that the minimum loss
# is taken
y_true = np.ones((1,num_params))
y_pred = np.ones((1,num_params))
y_pred1 = np.ones((1,num_params)); y_pred1[:,[1,2]] = -1
y_pred2 = np.ones((1,num_params)); y_pred2[:,[3,4]] = -1
y_pred3 = np.ones((1,num_params)); y_pred3[:,[1,2,3,4]] = -1
y_preds = [y_pred,y_pred1,y_pred2,y_pred3]
L_elements_len = int(num_params*(num_params+1)/2)
# Have to keep this matrix simple so that we still get a reasonable
# answer when we invert it for scipy check
L_elements = np.zeros((1,L_elements_len))+1e-2
# Get out the covariance matrix in numpy
l_mat_elements_tf = tf.constant(L_elements,dtype=tf.float32)
p_mat_tf, L_diag, _ = loss_class.construct_precision_matrix(
l_mat_elements_tf)
cov_mat = np.linalg.inv(p_mat_tf.numpy()[0])
# The correct value of the nlp
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),y_pred.flatten(),
cov_mat) -np.log(2 * np.pi)*num_params/2
for yp in y_preds:
yptf = tf.constant(np.concatenate([yp,L_elements],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.full_covariance_loss(yttf,yptf)
self.assertAlmostEqual(np.sum(diag_loss.numpy()),scipy_nlp,places=4)
# Repeat this excercise, but introducing error in prediction
for yp in y_preds:
yp[:,0] = 10
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),y_pred.flatten(),
cov_mat) -np.log(2 * np.pi)*num_params/2
for yp in y_preds:
yptf = tf.constant(np.concatenate([yp,L_elements],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.full_covariance_loss(yttf,yptf)
self.assertAlmostEqual(np.sum(diag_loss.numpy()),scipy_nlp,places=4)
# Confirm that when the wrong pair is flipped, it does not
# return the same answer.
y_pred4 = np.ones((1,num_params)); y_pred4[:,[5,2]] = -1
y_pred4[:,0] = 10
yptf = tf.constant(np.concatenate([y_pred4,L_elements],axis=-1),
dtype=tf.float32)
yttf = tf.constant(y_true,dtype=tf.float32)
diag_loss = loss_class.full_covariance_loss(yttf,yptf)
self.assertGreater(np.abs(diag_loss.numpy()-scipy_nlp),1)
# Make sure it is still consistent with the true nlp
scipy_nlp = -multivariate_normal.logpdf(y_true.flatten(),
y_pred4.flatten(),cov_mat) -np.log(2 * np.pi)*num_params/2
self.assertAlmostEqual(np.sum(diag_loss.numpy()),scipy_nlp,places=2)
# Finally, confirm that batching works
yptf = tf.constant(np.concatenate(
[np.concatenate([y_pred,L_elements],axis=-1),
np.concatenate([y_pred1,L_elements],axis=-1)],axis=0),
dtype=tf.float32)
self.assertEqual(yptf.shape,[2,27])
diag_loss = loss_class.full_covariance_loss(yttf,yptf).numpy()
self.assertEqual(diag_loss.shape,(2,))
self.assertEqual(diag_loss[0],diag_loss[1])
def test_log_gauss_gm_full(self):
# Will not be used for this test, but must be passed in.
flip_pairs = []
for num_params in range(1,10):
# Pick a random true, pred, and std and make sure it agrees with the
# scipy calculation
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
y_true = np.random.randn(num_params)
yttf=tf.constant(np.expand_dims(y_true,axis=0),dtype=float)
y_pred1 = np.random.randn(num_params)
yp1tf=tf.constant(np.expand_dims(y_pred1,axis=0),dtype=float)
y_pred2 = np.random.randn(num_params)
yp2tf=tf.constant(np.expand_dims(y_pred2,axis=0),dtype=float)
pi = np.random.rand()
pitf = tf.constant(np.array([[pi]]),dtype=float)
l_mat_elements_tf1 = tf.constant(
np.expand_dims(np.random.randn(int(num_params*(num_params+1)/2)),
axis=0),dtype=tf.float32)
l_mat_elements_tf2 = tf.constant(
np.expand_dims(np.random.randn(int(num_params*(num_params+1)/2)),
axis=0),dtype=tf.float32)
p_mat_tf1, L_diag1, _ = loss_class.construct_precision_matrix(
l_mat_elements_tf1)
p_mat_tf2, L_diag2, _ = loss_class.construct_precision_matrix(
l_mat_elements_tf2)
cov_mat1 = np.linalg.inv(p_mat_tf1.numpy()[0])
cov_mat2 = np.linalg.inv(p_mat_tf2.numpy()[0])
nlp_tensor = loss_class.log_gauss_gm_full(yttf,[yp1tf,yp2tf],
[p_mat_tf1,p_mat_tf2],[L_diag1,L_diag2],[pitf,1-pitf])
# Compare to scipy function to be exact. Add 2 pi offset.
scipy_nlp1 = (multivariate_normal.logpdf(y_true,y_pred1,cov_mat1)
+ np.log(2 * np.pi) * num_params/2 + np.log(pi))
scipy_nlp2 = (multivariate_normal.logpdf(y_true,y_pred2,cov_mat2)
+ np.log(2 * np.pi) * num_params/2 + np.log(1-pi))
scipy_nlp = -np.logaddexp(scipy_nlp1,scipy_nlp2)
# The decimal error can be significant due to inverting the precision
# matrix
self.assertAlmostEqual(np.sum(nlp_tensor.numpy()),scipy_nlp,places=1)
def test_gm_full_covariance_loss(self):
# Test that the diagonal covariance loss gives the correct values
flip_pairs = [[1,2],[3,4],[1,2,3,4]]
num_params = 6
loss_class = bnn_alexnet.LensingLossFunctions(flip_pairs,num_params)
# Set up a couple of test function to make sure that the minimum loss
# is taken
y_true = np.ones((1,num_params))
y_pred = np.ones((1,num_params))
y_pred1 = np.ones((1,num_params))
y_pred1[:,[1,2]] = -1
y_pred2 = np.ones((1,num_params))
y_pred2[:,[3,4]] = -1
y_pred3 = np.ones((1,num_params))
y_pred3[:,[1,2,3,4]] = -1
y_preds = [y_pred,y_pred1,y_pred2,y_pred3]
L_elements_len = int(num_params*(num_params+1)/2)
# Have to keep this matrix simple so that we still get a reasonable
# answer when we invert it for scipy check
L_elements = | np.zeros((1,L_elements_len)) | numpy.zeros |
# Created on Jun. 28, 2018
# An implementation of the homonymous Matlab function.
import numpy as np
from .polystab import polystab
"""
Input variables:
h - Frequency response array
w - Normalized frequency array (from zero to pi)
nb - numerator order
na - denominator order
wt - weight array, same length with h
gauss - whether to use Gauss-Newton method, default False
real - whether real or complex filter, default True
maxiter - maximum number of iteration when using Gauss-Newton method, default 30
tol - tolerance when using Gauss-Newton method, default 0.01
"""
def invfreqz(h, w, nb, na, wt=None, gauss=False, real=True, maxiter=30, tol=0.01):
if len(h) != len(w):
raise ValueError('H and W should be of equal length.')
nb = nb + 1
nm = max(nb-1, na)
OM_a = np.mat(np.arange(0, nm+1))
OM_m = OM_a.T * | np.mat(w) | numpy.mat |
import numpy as np
def shannon(data, sigma=1.0):
"""Given data (squared differences of vectors), return the entropy and p_ij values for the data."""
# Compute P-row and corresponding perplexity
arg = -data/(2*sigma**2)
if (arg > 0).any():
raise ValueError('At least one probability is negative')
if (arg > 710).any():
raise ValueError('overflow warning, sigma={0:.2g}'.format(sigma))
P = np.exp(arg)
sumP = P.sum(axis=0)
# H = -Sum_j p_jilogp_ji
# p_ji = P/sumP
# log p_ji = log P - log sumP
# H = Sum_j p_ji/sumP * (D_ji/2*sigma**2 + np.log(sumP))
# H = Sum_j (p_ji*D_ji/2*sigma**2))/sumP + p_ji/sumP*np.log(sumP)
# H = beta * Sum_j (p_ji*D_ji)/sumP + Sum_j p_ji/sumP *np.log(sumP)
# Sum_j p_ji = Sum_j p(j|i) = 1
# H = beta * meancondD + np.log(sumP)
H = np.log(sumP) + (2*sigma**2) * np.sum(data * P) / sumP
if np.abs(H) == np.inf:
raise ValueError('Entropy is undefined')
# normalize the p_ij
P = P/sumP
return H, P
def binary_search(D_i, target, inv_sigma=1., inv_sigma_min=1.*10**-8,
inv_sigma_max=np.inf, tol=10**-3, max_iters=100):
"""Implement a binary search to find the ideal sigma_i."""
H, P_i = shannon(D_i, 1/inv_sigma)
# Evaluate whether the perplexity is within tolerance
delta = H - target
iterations = 0
prevH = 0
if type(tol) is not float:
raise ValueError('tolerance value must be a number')
while np.abs(delta) > tol:
if iterations > max_iters:
break
if delta > 0:
# if difference is positive, the minimum bound of sigma
# is the current sigma:
inv_sigma_min = inv_sigma
# if sigmamax is at a boundary point:
if inv_sigma_max == np.inf:
# increase the current sigma to twice its value
# (sigmamax is too high to average)
inv_sigma = inv_sigma_min * 2.
else:
# otherwise take the average of bounds
inv_sigma = (inv_sigma_min + inv_sigma_max)/2.
else:
inv_sigma_max = inv_sigma
inv_sigma = (inv_sigma_min + inv_sigma_max)/2.
# Update
H, P_i = shannon(D_i, 1/inv_sigma)
delta = H - target
iterations += 1
if prevH == H:
return P_i, 1/inv_sigma
prevH = H
if iterations == 50:
print('Error, non convergence')
return P_i, 1/inv_sigma
def sne(X):
"""
# calculate the dotproduct between each sample:
# calculate |x_j|^2 for each vector
"""
sum_X = np.sum(np.square(X), 1)
dotprod = -2 * np.dot(X, X.T)
# calculate
# |x_j|^2 - 2*|x_i||x_j|cosTheta = |x_j|^2 - 2*|x_i - x_j|^2
# this is asymmetric
Dprime = np.add(dotprod, sum_X)
# symmetrize by completing the square:
# |x_j|^2 - 2*|x_i - x_j|^2 + |x_i|
D = np.add(Dprime.T, sum_X)
# set D_ii = 0
D = D.astype(np.float)
D = np.maximum(D, 0)
np.fill_diagonal(D, 0)
return D
def tsne_Y(Y):
"""
# The code below changes between t-SNE and SNE.
# The matrix below results in (p_ij + p_ji)/2 after exp and normalization
# calculate the dotproduct between each sample:
# calculate |x_j|^2 for each vector
"""
sum_Y = np.sum(np.square(Y), 1)
dotprod = -2 * np.dot(Y, Y.T)
# calculate
# |x_j|^2 - 2*|x_i||x_j|cosTheta = |x_j|^2 - 2*|x_i - x_j|^2
# this is asymmetric
Dprime = np.add(dotprod, sum_Y)
# symmetrize by completing the square:
# |x_j|^2 - 2*|x_i - x_j|^2 + |x_i|
D = np.add(Dprime.T, sum_Y)
# student t with 1df
numerator = 1/(1 + D)
Q = numerator/numerator.sum(axis=0)
# underflow
Q = np.maximum(Q, 10**-12)
np.fill_diagonal(Q, 0)
np.fill_diagonal(numerator, 0)
return Q, numerator
def run_SNE(X=np.array([]), no_dims=2, perplexity=30.0, reduce_dims=0, max_iter=1000, learning_rate=1., SNE=True, min_gain=0.1):
"""Run t-sne on dataset."""
# if desired, PCA reduce the data
if reduce_dims != 0:
X, _, _ = pca(X, reduce_dims)
print(X.max())
print(X.sum(axis=1).max())
# initialize variables
n, d = X.shape
min_gain = 0.01 # minimum gain
initial_momentum = 0.5
final_momentum = 0.8
# initialize Y matrix:
Y = np.random.randn(n, no_dims) # Y shaped as samples(n) and no_dims (50)
# initialize gradient wrt Y matrix
gradY = np.zeros((n, no_dims)) # deltaY
diffY = np.zeros((n, no_dims)) # for gradient computations
iY = np.zeros((n, no_dims)) # for gradient computations
gains = np.ones((n, no_dims)) # no clue
KL = np.zeros(max_iter)
# Compute P-values
P = find_perplexity(X, perplexity=perplexity, tol=1.*10**-3)
if SNE == False:
# symmetrize by adding p_ij + p_ji
P = P + np.transpose(P)
# normalization will take care of the
# extra factor of 2
P = P/P.sum(axis=1)
# make sure off-diagonals are not zero
# underflow is a real problem here
P = | np.maximum(P, 10**-20) | numpy.maximum |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import ptitprince as pt
import meld_classifier.paths as paths
from meld_classifier.meld_cohort import MeldCohort
import os
import glob
import pandas as pd
from statsmodels.formula.api import ols
import statsmodels.api as sm
import scipy.stats as stats
import json
from textwrap import wrap
class ExperimentComparison:
def __init__(
self,
experiments_dictionary,
experiment_path=paths.EXPERIMENT_PATH,
folds=range(10),
threshold="optimal",
params_for_experiment_name=None,
restrict_subjects=None,
):
"""
Class for comparing experiments. Calculates experiment statistics and generates plots summarising comparisons
Args:
threshold (string): "optimal" or "0.5"
params_for_experiment_name: optional dict containing data and network parameters that should be included
in the experiment name on the plots (useful when comparing experiments that vary two parameters)
restrict_subjects (optional, string): "FLAIR" "noFLAIR", restrict subjects to compare to a subset of the original test subjects.
Either use only subjects with FLAIR features, or only subjects without FLAIR features
"""
self.threshold = threshold
self.experiments_dictionary = experiments_dictionary
self.experiment_path = experiment_path
self.experiment_folders = list(experiments_dictionary.keys())
self.params_for_experiment_name = params_for_experiment_name
self.folds = folds
self.restrict_subjects = restrict_subjects
self.folds_df, self.fold_statistics = self.load_fold_results()
self.patients_df, self.controls_df = self.load_subject_results()
# --- data loading functions ---
def load_subject_results(self):
patient_stats = []
control_stats = []
patient_ids = []
control_ids = []
sub_column_p = []
sub_column_c = []
fold_column = []
for folder in self.experiment_folders:
for param in self.experiments_dictionary[folder]:
for fold in self.folds:
# get the name by which the experiment should be represented
exp_name = self._get_experiment_name(
folder,
param,
fold=fold,
use_params=self.params_for_experiment_name is not None,
params=self.params_for_experiment_name,
)
experiment_variable = os.path.basename(folder)[:-9]
# load per-subject results
fold_dict = self._load_json(
os.path.join(
self.experiment_path,
folder,
"fold_{}".format(fold),
"results",
"per_subject_{}_{}_{}.json".format(experiment_variable, param, self.threshold),
)
)
# get data parameters (needed to know how to filter subjects)
data_parameters = json.load(
open(
os.path.join(
self.experiment_path,
folder,
"fold_{}".format(fold),
"data_parameters_{}_{}.json".format(experiment_variable, param),
)
)
)
subject_ids = self.filter_subjects(
list(fold_dict["patients"].keys()), hdf5_file_root=data_parameters["hdf5_file_root"]
)
for subject in sorted(subject_ids):
patient_stats.append(fold_dict["patients"][subject])
patient_ids.append(subject)
sub_column_p.append(exp_name)
fold_column.append(fold)
subject_ids = self.filter_subjects(
list(fold_dict["controls"].keys()), hdf5_file_root=data_parameters["hdf5_file_root"]
)
for subject in sorted(subject_ids):
control_stats.append(fold_dict["controls"][subject])
control_ids.append(subject)
sub_column_c.append(exp_name)
patients_df = pd.DataFrame(patient_stats)
patients_df["subexperiment"] = sub_column_p
patients_df["subj_id"] = patient_ids
patients_df = patients_df.rename(columns={0: "detected", 1: "n_clusters", 2: "dice_index"})
patients_df["dice_index"] = np.log(patients_df["dice_index"] + 0.01)
patients_df["specificity"] = patients_df["n_clusters"] == 0
patients_df["n_clusters"] = np.log(patients_df["n_clusters"] + 0.5)
patients_df["fold"] = fold_column
controls_df = pd.DataFrame(control_stats)
controls_df["subexperiment"] = sub_column_c
controls_df["subj_id"] = control_ids
controls_df = controls_df.rename(columns={0: "any_clusters", 1: "n_clusters"})
controls_df["specificity"] = controls_df["n_clusters"] == 0
controls_df["n_clusters"] = np.log(controls_df["n_clusters"] + 0.5)
return patients_df, controls_df
def filter_subjects(self, subject_ids, hdf5_file_root="{site_code}_{group}_featuremetrix.hdf5"):
"""filter subjects to FLAIR or no FLAIR, depending on self.restrict_subjects.
Note: this is independent of the features that the model was actually trained on.
It looks in the hdf5 and thus filters on general availability of FLAIR or not
"""
if self.restrict_subjects is None:
return subject_ids
else:
c = MeldCohort(hdf5_file_root=hdf5_file_root)
# get all FLAIR subjects
all_flair_subject_ids = cohort.get_subject_ids(subject_features_to_exclude=["FLAIR"])
# restrict subjects to those that have flair features
flair_subject_ids = [subj_id for subj_id in subject_ids if subj_id in all_flair_subject_ids]
if self.restrict_subjects == "FLAIR":
print("using {} of {} subjects".format(len(flair_subject_ids), len(subject_ids)))
return flair_subject_ids
elif self.restrict_subjects == "noFLAIR":
# return difference between all subjects and flair subjects (resulting in those that dont have flair)
noflair_subject_ids = list(np.setdiff1d(subject_ids, flair_subject_ids))
print("using {} of {} subjects".format(len(noflair_subject_ids), len(subject_ids)))
return noflair_subject_ids
else:
raise NotImplementedError(self.restrict_subjects)
def load_fold_results(self):
folds_column = []
fold_stats = []
sub_column = []
for fold in self.folds:
for folder in self.experiment_folders:
for param in self.experiments_dictionary[folder]:
# extract variable name omitting the date
experiment_variable = os.path.basename(folder)[:-9]
exp_name = self._get_experiment_name(
folder,
param,
fold=fold,
use_params=self.params_for_experiment_name is not None,
params=self.params_for_experiment_name,
)
stats_df = pd.read_csv(
os.path.join(
self.experiment_path,
folder,
"fold_{}".format(fold),
"results",
"test_results_{}_{}.csv".format(experiment_variable, param),
)
)
folds_column.append(fold)
sub_column.append(exp_name)
if self.threshold == "optimal":
fold_stats.append(stats_df.loc[1])
elif self.threshold == "0.5":
fold_stats.append(stats_df.loc[0])
# get names of statistics from one of the dataframes
fold_statistics = list(stats_df.columns)
# format into nice table
folds_df = pd.DataFrame(fold_stats)
folds_df["fold"] = folds_column
folds_df["subexperiment"] = sub_column
return folds_df, fold_statistics
def _load_json(self, json_file):
with open(json_file, "r") as f:
results_dict = json.load(f)
return results_dict
def _get_experiment_name(self, experiment_folder, param_value, fold=0, use_params=False, params={}):
exp_name = os.path.basename(experiment_folder)[:-9]
if use_params is False:
# take original experiment name consisting of parameter to vary + parameter value
# remove date from experiment_folder (9 characters)
name = "{}_{}".format(exp_name, param_value)
else:
# use format: parameter1_value1-parameter2_value2...
exp_path = os.path.join(self.experiment_path, experiment_folder, "fold_{}".format(fold))
data_params = self._load_json(
os.path.join(exp_path, "data_parameters_{}_{}.json".format(exp_name, param_value))
)
network_params = self._load_json(
os.path.join(exp_path, "network_parameters_{}_{}.json".format(exp_name, param_value))
)
name = []
for p in params.get("data_parameters", []):
name.append("{}_{}".format(p, data_params[p]))
for p in params.get("network_parameters", []):
name.append("{}_{}".format(p, network_params[p]))
name = "-".join(name)
return name
# --- calculate comparison functions ---
def calculate_per_patient_ranks(self, stats_of_interest, subexperiments):
dataframe = self.patients_df
print(stats_of_interest, subexperiments)
df1 = dataframe[["subj_id", "fold", stats_of_interest[0], stats_of_interest[1]]][
dataframe["subexperiment"] == subexperiments[0]
]
df2 = dataframe[["subj_id", "fold", stats_of_interest[0], stats_of_interest[1]]][
dataframe["subexperiment"] == subexperiments[1]
]
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
diff_df = df1.copy()
diff_df[stats_of_interest[0]] = df1[stats_of_interest[0]] - df2[stats_of_interest[0]]
diff_df[stats_of_interest[1]] = df1[stats_of_interest[1]] - df2[stats_of_interest[1]]
sorted = diff_df.sort_values(by=["dice_index"])
sorted.to_csv(
os.path.join(
self.experiment_path,
self.experiment_folders[0],
"per_patient_differences_{}-{}.csv".format(subexperiments[0], subexperiments[1]),
),
index=False,
)
return
def anova(self, dataframe, statistic):
"""test independence of different experiments"""
mod = ols('Q("{}") ~ Q("{}")'.format(statistic, "subexperiment"), data=dataframe).fit()
try:
aov_table = sm.stats.anova_lm(mod, typ=2)
except ValueError:
aov_table = sm.stats.anova_lm(mod, typ=1)
stat_ = np.array(aov_table)[0, 2]
p = | np.array(aov_table) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
"""
Utilities to generate channels defined by their Kraus operators,
and convert them to Choi state matrix if needed.
Output format is systematically an array of shape (r, d, d),
where r is the rank of the channel, d is the dimension of the underlying
Hilbert space and each (d, d)-array is a Kraus operator.
Unless specified, 'channel' will refer to this form in the end of this
description.
Main functions are:
* QFTKraus: Generates the channel of the quantum Fourier transform.
* KrausOp: Makes a convex combination of unitary channels. If
no channel is provided, unitary basis is assumed. Used for generating sums
of random unitary channels.
* add_disentanglement_noise: Takes a channel $C$ acting on qubits and returns
a noisy version of it: after $C$, there is a chance that a projection is
applied on the first qubit. Similar effect if used on a channel not acting on qubits.
* Choi: Generates the Choi matrix of a channel.
"""
import numpy as N
import scipy.linalg as SL
import scipy.stats as SS
def sylvester(d):
"""
Sylvester unitary matrix.
"""
syl = N.diagflat(N.ones(d-1), -1)
syl[0, -1] = 1
return syl
def clock(d):
"""
Clock unitary matrix.
"""
roots_unity = N.e**(N.arange(d) * 2 * N.pi * 1j / d)
return N.diagflat(roots_unity)
def basis_unitary(d):
"""
Yields an orthogonal basis of the set unitary matrices U(d).
Output array is (d, d, d).
First dimension is the index of the unitary in the basis.
The unitary with index $di + j$ is $C^i \cdot S^j$, where
C is the clock matrix and S is the Sylvester matrix.
"""
clocks = clock(d)
clock_stack = N.eye(d, dtype=complex).reshape(1, d, d) * N.ones((d, 1, 1))
for j in range(1, d):
clock_stack[j,:,:] = clock_stack[j-1,:,:] @ clocks
syl = sylvester(d)
syl_stack = N.eye(d, dtype=complex).reshape(1, d, d) * N.ones((d, 1, 1))
for j in range(1, d):
syl_stack[j,:,:] = syl_stack[j-1,:,:] @ syl
basis = N.zeros((d**2, d, d), dtype=complex)
for i in range(d):
for j in range(d):
basis[i + j * d,:,:] = clock_stack[i,:,:] @ syl_stack[j,:,:]
return basis
def sub_basis(d, indices_list):
"""
Generates the elements of indices given in indices_list of the orthogonal
basis of unitary matrices given by: The unitary with
index $di + j$ is $C^i \cdot S^j$, where
C is the clock matrix and S is the Sylvester matrix.
Output array is (len(indices_list), d, d).
"""
cl = clock(d)
syl = sylvester(d)
return N.array([N.linalg.matrix_power(cl, i) @ N.linalg.matrix_power(syl,j) for (i,j) in indices_list])
def rand_unitary(dim):
"""
Generates a uniformly random unitary channel.
"""
z = 1/N.sqrt(2)*(SS.norm.rvs(size=(dim,dim)) + 1j*SS.norm.rvs(size=(dim,dim)))
q, r = SL.qr(z)
d = r.diagonal()
q *= d/N.abs(d)
return q
def convex_combi_channels(d, weights, channels):
"""
Makes a convex combination channels.
Input:
* d is the dimension of the underlying Hilbert space
* weights is an array-like with the weights of each channel. They
must sum to one, and be non-negative.
* channels: list of channels
"""
weights = N.asarray(weights)
assert N.isclose(weights.sum(), 1), "Not trace-preserving; \sum w_c[0] must equal 1."
coeffs = N.sqrt(weights)
Kraus = N.concatenate([coeff * channel for (coeff, channel) \
in zip(coeffs, channels)])
return Kraus
def KrausOp(d, weights, indices, us=None):
"""
Convex combination of unitary channels.
Write r for the rank of the operator.
Input:
* d is the dimension of the underlying Hilbert space
* weights is an array-like with the weights of each channel. They
must sum to one, and be non-negative.
* indices are which r unitary operators in us are chosen.
* If the list us is None, then it is assumed to be the output basis of
the function basis_unitary(d).
"""
weights = N.asarray(weights)
indices = N.asarray(indices)
if us is None:
us = basis_unitary(d)
assert N.isclose(weights.sum(), 1), "Not trace-preserving; \sum w_c[0] must equal 1."
coeffs = N.sqrt(weights)
Kraus = coeffs.reshape(-1, 1, 1) * us[indices, :, :]
return Kraus
def add_disentanglement_noise(channel, level):
"""
Adds the following noise to a channel: with probability level, a measurement
is applied in the natural basis to the first qubit, discarding the
result. This corresponds to adding two Kraus operators to each Kraus
operator $K$, namely $P_+ K$ and $P_- K$, where $P_+$ is the projection on
the subspace spanned by the first half of basis vectors, and $P_-$ the
projection on the subspace spanned by the other half.
INPUT
channel: (r, d, d)-array of Kraus operators of the channel.
level: Probability of applying the disentanglement. Between 0 and 1.
OUTPUT
In general, (2r, d, d)-array of Kraus operators.
First r operators are the scaled original ones.
Last r operators are the difference between those corresponding to projecting
on the first half of basis vectors (as, measurement of the first qubit yielded +).
and those corresponding to projecting on the second half
of basis vectors (as, measurement of the first qubit yielded -).
Indeed the a priori rank (3r) channel is at most (2r).
If the underlying space's dimension is odd, the second half has one more
dimension.
Exception:
* If level=0, original (r, d, d)-array of Kraus operators.
"""
if level == 0:
return channel
r, d = channel.shape[:2]
half_d = d // 2
P_plus = N.diag(N.arange(d) < half_d) * | N.sqrt(level/2) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 13:38:02 2019
@author: brsr
"""
import geopandas
import pandas as pd
import shapely
from shapely.geometry import LineString, Polygon, Point
import pyproj
#import homography
import warnings
import numpy as np
from abc import ABC
from scipy.optimize import minimize, minimize_scalar, root_scalar
from scipy.special import hyp2f1, gamma, ellipj, ellipk, ellipkinc
#TODO:
#vectorize all the things
#find a better implementation of conformal
# (some kind of circle-packing thing?)
#repeated subdivision
#arange3 = np.arange(3)
#FIRST AXIS IS SPATIAL
TGTPTS3 = np.eye(3)
TGTPTS4 = np.array([[0, 1, 1, 0],
[0, 0, 1, 1]])
def normalize(vectors, axis=0):
"""Normalizes vectors in n-space. The zero vector remains the zero vector.
Args:
vectors: Array of vectors
axis: Which axis to take the norm over (by default the first axis, 0)
>>> x = np.stack((np.ones(5), np.arange(5)), axis=0)
>>> normalize(x)
array([[1. , 0.70710678, 0.4472136 , 0.31622777, 0.24253563],
[0. , 0.70710678, 0.89442719, 0.9486833 , 0.9701425 ]])
"""
n = np.linalg.norm(vectors, axis=axis, keepdims=True)
return np.where(n <= 0, 0, vectors / n)
def complex_to_float2d(arr):
"""Converts a complex array to a multidimensional float array.
>>> x = np.exp(2j*np.pi*np.linspace(0, 1, 5)).round()
>>> complex_to_float2d(x.round())
array([[ 1., 0.],
[ 0., 1.],
[-1., 0.],
[-0., -1.],
[ 1., -0.]])
"""
return arr.view(float).reshape(list(arr.shape) + [-1])
def float2d_to_complex(arr):
"""Converts a multidimensional float array to a complex array.
Input must be a float type, since there is no integer complex type.
>>> y = np.arange(8, dtype=float).reshape((-1, 2))
>>> float2d_to_complex(y)
array([[0.+1.j],
[2.+3.j],
[4.+5.j],
[6.+7.j]])
"""
return arr.view(complex)
def sqrt(x):
"""Real sqrt clipped to 0 for negative values.
>>> x = np.array([-np.inf, -1, 0, 1, np.inf, np.nan])
>>> sqrt(x)
array([ 0., 0., 0., 1., inf, nan])
"""
return np.where(x < 0, 0, np.sqrt(x))
def geodesics(lon, lat, geod, n=100, includepts=False):
"""Draw geodesics between each adjacent pair of points given by
lon and lat.
"""
lon2 = np.roll(lon, -1, axis=0)
lat2 = np.roll(lat, -1, axis=0)
result = []
for l, t, l2, t2 in zip(lon, lat, lon2, lat2):
g = geod.npts(l, t, l2, t2, n)
g.insert(0, (l, t))
g.append((l2, t2))
result.append(LineString(g))
ctrlboundary = geopandas.GeoSeries(result)
if includepts:
controlpts = arraytoptseries(np.array([lon, lat]))
ctrlpoly = geopandas.GeoSeries(pd.concat([ctrlboundary, controlpts],
ignore_index=True))
return ctrlpoly
else:
return ctrlboundary
def transform_antipode(lon, lat):
"""Transform a point given by lon and lat to its antipode."""
lon2 = lon - 180
np.where(lon2 <= -180, lon2 + 360, lon2)
return lon2, -lat
def ptseriestoarray(ser):
"""Convert a geopandas GeoSeries containing shapely Points
(or LineStrings of all the same length) to an array of
shape (2, n) or (3, n).
"""
return np.stack([x.coords for x in ser], axis=-1).squeeze()
def arraytoptseries(arr, crs={'epsg': '4326'}):
"""Convert an array of shape (2, ...) or (3, ...) to a
geopandas GeoSeries containing shapely Point objects.
"""
if arr.shape[0] == 2:
result = geopandas.GeoSeries([Point(x[0], x[1])
for x in arr.reshape(2, -1).T])
else:
result = geopandas.GeoSeries([Point(x[0], x[1], x[2])
for x in arr.reshape(3, -1).T])
#result.crs = crs
return result
def transeach(func, geoms):
"""Transform each element of geoms using the function func."""
plist = []
for geom in geoms:
if isinstance(geom, Point):
#special logic for points
ll = geom.coords[0]
plist.append(Point(func(*ll)))
else:
plist.append(shapely.ops.transform(func, geom))
return geopandas.GeoSeries(plist)
def graticule(spacing1=15, spacing2=1,
lonrange = [-180, 180], latrange = [-90, 90]):
"""
Create a graticule (or another square grid)
"""
a = int((lonrange[1] - lonrange[0])//spacing2)
b = int((latrange[1] - latrange[0])//spacing1)
c = int((lonrange[1] - lonrange[0])//spacing1)
d = int((latrange[1] - latrange[0])//spacing2)
plx = np.linspace(lonrange[0], lonrange[1], num=a + 1)
ply = np.linspace(latrange[0], latrange[1], num=b + 1)
mex = np.linspace(lonrange[0], lonrange[1], num=c + 1)
mey = np.linspace(latrange[0], latrange[1], num=d + 1)
parallels = np.stack(np.meshgrid(plx, ply), axis=-1).transpose((1,0,2))
meridians = np.stack(np.meshgrid(mex, mey), axis=-1)
gratlist = [parallels[:, i] for i in range(parallels.shape[1])]
gratlist += [meridians[:, i] for i in range(meridians.shape[1])]
gratl2 = [LineString(line) for line in gratlist]
grat = geopandas.GeoSeries(gratl2)
grat.crs = {'init': 'epsg:4326'}
return grat
#%%
def trigivenangles(angles, scale=np.pi/180):
"""Given angles, create the vertices of a triangle with those vertex
angles. Only uses the first 2 angles. The last vertex is always 1, 0.
>>> angles = np.array([45,90,45])
>>> np.round(trigivenangles(angles), decimals=8)
array([[-1., 0., 1.],
[ 0., -1., 0.]])
"""
angles = angles * scale
p0 = [np.cos(2*angles[1]), np.sin(2*angles[1])]
p1 = [np.cos(2*angles[0]), np.sin(-2*angles[0])]
p2 = [1, 0]
return np.array([p0, p1, p2]).T
def anglesgivensides(sides, scale=180/np.pi):
"""Given side lengths of a triangle, determines the interior angle at each
vertex, and the radius of the circumcircle.
>>> sides=np.array( [3,4,5])
>>> anglesgivensides(sides)
"""
#might be more stable to use law of cotangents, but eh
r = np.product(sides)/sqrt(
2*np.sum(sides**2*np.roll(sides,1)**2)
-np.sum(sides**4))
s1 = sides
s2 = np.roll(sides, -1)
s3 = np.roll(sides, 1)
cosangle = (s2**2 + s3**2 - s1**2)/ (2*s2*s3)
angles = np.arccos(cosangle)
return angles*scale, r
def trigivenlengths(sides):
"""Given side lengths, creates the vertices of a triangle with those
side lengths, and having circumcenter at 0,0.
>>> sides=np.array( [3,4,5])
>>> np.round(trigivenlengths(sides), decimals=8)
array([[-2.5, -0.7, 2.5],
[ 0. , -2.4, 0. ]])
"""
angles, r = anglesgivensides(sides, scale=1)
return r*trigivenangles(np.roll(angles, -1), scale=1)
#%%
def central_angle(x, y, signed=False):
"""Central angle between vectors with respect to 0. If vectors have norm
1, this is the spherical distance between them.
Args:
x, y: Coordinates of points on the sphere.
axis: Which axis the vectors lie along. By default, -1.
Returns: Array of central angles.
>>> t = np.linspace(0, np.pi, 5)
>>> c = np.cos(t)
>>> s = np.sin(t)
>>> z = np.zeros(t.shape)
>>> x = np.stack((c, s, z), axis=0)
>>> y = np.stack((c, z, s), axis=0)
>>> np.round(central_angle(x, y)/np.pi*180)
array([ 0., 60., 90., 60., 0.])
"""
cos = np.sum(x*y, axis=0)
sin = np.linalg.norm(np.cross(x, y, axis=0), axis=0)
result = np.arctan2(sin, cos)
return result if signed else abs(result)
def slerp(pt1, pt2, intervals):
"""Spherical linear interpolation.
Args:
pt1: Array of points. When interval is 0, the result is pt1.
pt2: Array of points. When interval is 1, the result is pt2.
intervals: Array of intervals at which to evaluate the
linear interpolation
>>> x = np.array([1, 0, 0])
>>> y = np.array([0, 0, 1])
>>> t = np.linspace(0, 1, 4)[:, np.newaxis]
>>> slerp(x, y, t)
array([[1. , 0. , 0. ],
[0.8660254, 0. , 0.5 ],
[0.5 , 0. , 0.8660254],
[0. , 0. , 1. ]])
"""
t = intervals
angle = central_angle(pt1, pt2)[..., np.newaxis]
return (np.sin((1 - t)*angle)*pt1 + np.sin((t)*angle)*pt2)/np.sin(angle)
def dslerp(pt1, pt2, intervals):
"""The derivative of slerp."""
t = intervals
angle = central_angle(pt1, pt2)[..., np.newaxis]
return (-np.cos((1 - t)*angle)*pt1 + np.cos(t*angle)*pt2)/np.sin(angle)
def triangle_solid_angle(a, b, c, axis=0):
"""Solid angle of a triangle with respect to 0. If vectors have norm 1,
this is the spherical area. Note there are two solid angles defined by
three points, determined by orientation of a, b, c.
Formula is from <NAME>; <NAME> (1983).
"The Solid Angle of a Plane Triangle". IEEE Trans. Biom. Eng.
BME-30 (2): 125–126. doi:10.1109/TBME.1983.325207.
Args:
a, b, c: Coordinates of points on the sphere.
Returns: Array of solid angles.
>>> t = np.linspace(0, np.pi, 5)
>>> a = np.stack([np.cos(t), np.sin(t), np.zeros(5)],axis=0)
>>> b = np.array([0, 1, 1])/np.sqrt(2)
>>> c = np.array([0, -1, 1])/np.sqrt(2)
>>> np.round(triangle_solid_angle(a, b, c), 4)
array([ 1.5708, 1.231 , 0. , -1.231 , -1.5708])
"""
axes = (axis,axis)
top = np.tensordot(a, np.cross(b, c, axis=axis), axes=axes)
na = np.linalg.norm(a, axis=0)
nb = np.linalg.norm(b, axis=0)
nc = np.linalg.norm(c, axis=0)
bottom = (na * nb * nc + np.tensordot(a, b, axes=axes) * nc
+ np.tensordot(b, c, axes=axes) * na
+ np.tensordot(c, a, axes=axes) * nb)
return 2 * np.arctan2(top, bottom)
def shoelace(pts):
"""Find area of polygon in the plane defined by pts, where pts is an
array with shape (2,n).
>>> pts = np.arange(6).reshape(2,-1)%4
>>> shoelace(pts)
2.0
"""
return abs(np.sum(np.cross(pts, np.roll(pts, -1, axis=1), axis=0)))/2
def antipode_v(ll):
"""Antipodes of points given by longitude and latitude."""
antipode = ll.copy()
antipode[0] -= 180
index = antipode[0] < -180
antipode[0, index] += 360
antipode[1] *= -1
return antipode
def omegascale(adegpts, degpts_t, geod, spacing=1):
"""Estimate scale factor and max deformation angle for a map projection
based on a grid of points
"""
#actrlpts, tgtpts,
#ar, p = geod.polygon_area_perimeter(actrlpts[0], actrlpts[1])
#at = shoelace(tgtpts)
es = geod.es
a = geod.a
factor = np.pi/180
#lon = adegpts[0]*factor
lat = adegpts[1]*factor
x = degpts_t[0]
y = degpts_t[1]
dx = np.gradient(x, factor*spacing)
dy = np.gradient(y, factor*spacing)
dxdlat, dxdlon = dx
dydlat, dydlon = dy
J = (dydlat*dxdlon - dxdlat*dydlon)
R = a*np.sqrt(1-es)/(1-es*np.sin(lat)**2)
h = sqrt((dxdlat)**2 + (dydlat)**2)*(1-es*np.sin(lat)**2)**(3/2)/(a*(1-es))
k = sqrt((dxdlon)**2 + (dydlon)**2)*(1-es*np.sin(lat)**2)**(1/2)/(a*np.cos(lat))
scale = J/(R**2*np.cos(lat))
sinthetaprime = np.clip(scale/(h*k), -1, 1)
aprime = sqrt(h**2 + k**2 + 2*h*k*sinthetaprime)
bprime = sqrt(h**2 + k**2 - 2*h*k*sinthetaprime)
sinomegav2 = np.clip(bprime/aprime, -1, 1)
omega = 360*np.arcsin(sinomegav2)/np.pi
return omega, scale
def rodrigues(center, v, theta):
"""Rodrigues formula: rotate vector v around center by angle theta
"""
cxv = np.cross(center, v)
cv = np.sum(center* v, axis=-1, keepdims=True)
cc = v*np.cos(theta) + cxv*np.sin(theta) + center*cv*(1-np.cos(theta))
return cc
#%%
class Projection(ABC):
"""Don't subclass this without subclassing one of
transform and transform_v and one of invtransform and invtransform_v,
or else an infinite regression will occur"""
def transform(self, x, y, z = None, **kwargs):
if z is None:
pts = np.stack([x,y])
else:
pts = np.stack([x,y,z])
vresult = self.transform_v(pts, **kwargs)
return vresult
def invtransform(self, x, y, z=None, **kwargs):
if z is None:
pts = np.stack([x,y])
else:
pts = np.stack([x,y,z])
vresult = self.invtransform_v(pts, **kwargs)
return vresult
def transform_v(self, pts, **kwargs):
rpts = pts.reshape((pts.shape[0],-1)).T
result = []
for xy in rpts:
result.append(self.transform(*xy, **kwargs))
result = np.array(result)
shape = [-1, ] + list(pts.shape[1:])
return result.T.reshape(shape)
def invtransform_v(self, pts, **kwargs):
rpts = pts.reshape((pts.shape[0],-1)).T
result = []
for xy in rpts:
result.append(self.invtransform(*xy, **kwargs))
result = np.array(result)
shape = [-1, ] + list(pts.shape[1:])
return result.T.reshape(shape)
#%%
class UV(Projection):
nctrlpts = 4
@staticmethod
def grid(**kwargs):
"""Create a square grid"""
return graticule(spacing1=1, spacing2=0.01,
lonrange=[0,1], latrange=[0,1])
@staticmethod
def gridpolys(n=11):
poi = np.array(np.meshgrid(np.linspace(0, 1, n),
np.linspace(0, 1, n)))
poilist = []
for i, j in np.ndindex(n-1,n-1):
x = Polygon([poi[:, i, j], poi[:, i, j+1],
poi[:, i+1, j+1], poi[:, i+1, j]])
poilist.append(x)
poiframe = geopandas.geoseries.GeoSeries(poilist)
return poiframe
@staticmethod
def segment(uv):
u, v = uv
index1 = u > v
index2 = u < 1 - v
#1 and 2 = 0
#1 and not 2 = 1
#not 1 and not 2 = 2
#not 1 and 2 = 3
result = np.zeros(u.shape)
result[index1 & ~index2] = 1
result[~index1 & ~index2] = 2
result[~index1 & index2] = 3
return result
class Bilinear(UV):
"""Bilinear interpolation
"""
_bilinear_mat = np.array([[ 1, 1, 1, 1],
[-1, 1, 1,-1],
[-1,-1, 1, 1],
[ 1,-1, 1,-1]])/4
def __init__(self, tgtpts):
self.tgtpts = tgtpts
self.abcd = self._bilinear_mat @ tgtpts.T
def transform(self, u, v):
"""u and v should have the same shape"""
abcd = self.abcd
stack = np.stack([np.ones(u.shape), u, v, u*v])
return (abcd @ stack).T
def transform_v(self, pts, **kwargs):
return self.transform(pts[0], pts[1])
def invtransform_v(self, pts):
abcd = self.abcd
A = abcd[:,0]
B = abcd[:,1]
C = abcd[:,2]
D = abcd[:,3] - pts
AB = np.cross(A,B)
AC = np.cross(A,C)
AD = np.cross(A,D)
BC = np.cross(B,C)
BD = np.cross(B,D)
CD = np.cross(C,D)
ua = 2*BD
ub = AD + BC
uc = 2*AC
va = 2*CD
vb = AD - BC
vc = 2*AB
u1 = (-ub + sqrt(ub**2 - ua*uc) )/ua
#u2 = (-ub - sqrt(ub**2 - ua*uc) )/ua
#v2 = (-vb + sqrt(vb**2 - va*vc) )/va
v1 = (-vb - sqrt(vb**2 - va*vc) )/va
return u1, v1
class Homeomorphism(UV):
"""Homeomorphism"""
def __init__(self, tgtpts):
self.tgtpts = tgtpts
class Barycentric(Projection):
"""Transforms between plane and barycentric coordinates"""
nctrlpts = 3
def __init__(self, tgtpts):
self.tgtpts = tgtpts
m = np.concatenate([self.tgtpts, np.ones((1, 3))])
self.minv = np.linalg.inv(m)
def transform_v(self, bary):
"""Convert barycentric to plane"""
rbary = bary.reshape(3,-1)
result = self.tgtpts @ rbary
shape = [2,] + list(bary.shape[1:])
return result.reshape(shape)
def invtransform_v(self, xy):
"""Convert plane to barycentric"""
rxy = xy.reshape(2,-1)
shape = list(rxy.shape)
shape[0] = 1
xy1 = np.concatenate([rxy, np.ones(shape)])
result = self.minv @ xy1
shape = [3,] + list(xy.shape[1:])
return result.reshape(shape)
@staticmethod
def grid(spacing1=0.1, spacing2=1E-2, rang = [0, 1], eps=1E-8):
"""Create a triangle grid in barycentric coordinates
"""
nx = int((rang[1] - rang[0])/spacing1 + 1)
ny = int((rang[1] - rang[0])/spacing2 + 1)
x = np.linspace(rang[0], rang[1], nx)
y = np.linspace(rang[0], rang[1], ny)
z = 1 - x[..., np.newaxis] - y
#valid = (rang[0] <= z) & (z <= rang[1])
#z[~valid] = np.nan
bary1 = np.stack([np.broadcast_to(x[..., np.newaxis], (nx, ny)),
np.broadcast_to(y, (nx, ny)),
z])
bary = np.concatenate([bary1, np.roll(bary1, -1, axis=0),
np.roll(bary1, -2, axis=0)], axis=1)
gratlist = [bary[:, i] for i in range(nx*3)]
gratl2 = []
for i in range(nx*3):
g = gratlist[i]
valid = np.all((rang[0]-eps <= g) & (g <= rang[1]+eps), axis=0)
if np.sum(valid) > 1:
g = g[..., valid]
gratl2.append(LineString(g.T))
grat = geopandas.GeoSeries(gratl2)
return grat
@staticmethod
def gridpolys(n=11, eps=0.01):
poi = np.meshgrid(np.linspace(0, 1, n), np.linspace(0, 1, n))
poi.append(1 - poi[0] - poi[1])
poi = np.array(poi)
poilist = []
for i,j in np.ndindex(n-1,n-1):
if poi[2, i, j] >= eps:
x = Polygon([poi[:, i, j],poi[:, i, j+1],poi[:, i+1, j]])
poilist.append(x)
if poi[2, i+1, j+1] >= -eps:
y = Polygon([poi[:, i+1, j+1],poi[:, i+1, j],poi[:, i, j+1]])
poilist.append(y)
poiframe = geopandas.geoseries.GeoSeries(poilist)
return poiframe
@staticmethod
def segment(bary):
return np.argmin(bary, axis=0)
class UnitVector(Projection):
"""Convert longitude and latitude to unit vector normals.
The methods of this class are static, and mostly organized in a class
for consistency."""
@staticmethod
def transform(x, y, **kwargs):
pts = np.stack([x,y])
vresult = UnitVector.transform_v(pts, **kwargs)
return vresult
@staticmethod
def invtransform(x, y, z, **kwargs):
pts = np.stack([x,y,z])
vresult = UnitVector.invtransform_v(pts, **kwargs)
return vresult
@staticmethod
def transform_v(ll, scale=np.pi/180):
"""Convert longitude and latitude to 3-vector
>>> ll = np.arange(6).reshape(2,3)*18
>>> UnitVector.transform_v(ll)
array([[5.87785252e-01, 2.93892626e-01, 4.95380036e-17],
[0.00000000e+00, 9.54915028e-02, 3.59914664e-17],
[8.09016994e-01, 9.51056516e-01, 1.00000000e+00]])
"""
lon, lat = ll*scale
x = np.cos(lat)*np.cos(lon)
y = np.cos(lat)*np.sin(lon)
z = np.sin(lat)
return np.stack([x, y, z], axis=0)
@staticmethod
def invtransform_v(pts, scale=180/np.pi):
"""Convert 3-vector to longitude and latitude.
Vector does not have to be normalized.
>>> UnitVector.invtransform_v(np.eye(3))
array([[ 0., 90., 0.],
[ 0., 0., 90.]])
"""
lat = scale*np.arctan2(pts[2], sqrt(pts[1]**2 + pts[0]**2))
lon = scale*np.arctan2(pts[1], pts[0])
return np.stack([lon, lat], axis=0)
_unitsphgeod = pyproj.Geod(a=1, b=1)
class CtrlPtsProjection(Projection, ABC):
"""Subclass for any map projection that uses (2 or more) control points."""
def __init__(self, ctrlpts, geod = _unitsphgeod):
"""Parameters:
ctrlpts: 2x3 or 2x4 Numpy array, latitude and longitude of
each control point
geod= a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1)
"""
n = ctrlpts.shape[1]
if self.nctrlpts != n:
raise ValueError(
'ctrlpts has wrong number of points for this projection')
self.geod = geod
#it's possible to get a geod where this would give the wrong answer,
#but I think it would have to be really weird
area, _ = geod.polygon_area_perimeter([0,120,-120],[0,0,0])
self.totalarea = 2*area
self.ctrlpts = ctrlpts
ctrlpts_v = UnitVector.transform_v(ctrlpts)
self.ctrlpts_v = ctrlpts_v
center_v = ctrlpts_v.sum(axis=1)
self.center_v = center_v / np.linalg.norm(center_v)
self.center = UnitVector.invtransform_v(center_v)
antipode = antipode_v(ctrlpts)
self.antipode = antipode
self.antipode_v = UnitVector.transform_v(antipode)
self.sa = 0
if self.nctrlpts > 2:
faz, baz, sides = self.geod.inv(ctrlpts[0], ctrlpts[1],
np.roll(ctrlpts[0], -1),
np.roll(ctrlpts[1], -1))
self.sides = sides
self.faz = faz
self.baz = baz
self.ctrl_angles = (faz - np.roll(baz, 1))%360
area, _ = geod.polygon_area_perimeter(*ctrlpts)
self.area = area
self.ca = central_angle(ctrlpts_v,
np.roll(ctrlpts_v, -1, axis=1))
for i in range(1, self.nctrlpts-1):
self.sa += triangle_solid_angle(ctrlpts_v[..., 0],
ctrlpts_v[..., i],
ctrlpts_v[..., i+1])
self.edgenormals = np.cross(ctrlpts_v,
np.roll(ctrlpts_v, -1, axis=1), axis=0)
else:
faz, baz, sides = self.geod.inv(ctrlpts[0,0], ctrlpts[1,0],
ctrlpts[0,1], ctrlpts[1,1])
self.sides = sides
self.faz = faz
self.baz = baz
self.area = 0
self.ca = central_angle(ctrlpts_v[..., 0], ctrlpts_v[..., 1])
self.edgenormals = np.cross(ctrlpts_v[..., 0], ctrlpts_v[..., 1])
self.cosca = np.cos(self.ca)
self.sinca = np.sin(self.ca)
if self.sa < 0:
warnings.warn('control polygon is in negative orientation, '
+ 'may cause unusual results')
if self.nctrlpts == 4:
ctrlpts_v = self.ctrlpts_v
v0 = ctrlpts_v[..., 0]
v1 = ctrlpts_v[..., 1]
v2 = ctrlpts_v[..., 2]
v3 = ctrlpts_v[..., 3]
poip1 = np.cross(np.cross(v0, v1), np.cross(v3, v2))
poip2 = np.cross(np.cross(v0, v3), np.cross(v1, v2))
poip = np.stack([[poip1, -poip1],
[poip2, -poip2]]).transpose(2,0,1)
poip = poip / np.linalg.norm(poip, axis=0)
self.poi_v = poip
self.poi = UnitVector.invtransform_v(poip)
self.crossx = np.cross(ctrlpts_v,
np.roll(ctrlpts_v, -2, axis=1),
axis=0)[..., :2]
def orienttgtpts(self, tgtpts, N = (0, 90)):
"""Orient target points so that line from 0 to the projection of N
points up. Will fail if map projection doesn't define tgtpts."""
pN = self.transform(*N)
if np.allclose(pN, [0,0]):
raise ValueError('projection of N too close to 0')
angle = np.arctan2(pN[0],pN[1])
rotm = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
result = rotm @ tgtpts
self.tgtpts = result
def lune(self, lon, lat):
"""
Determine which lune a point or series of points lies in.
Lune 0 is the lune with vertex at the centroid and edges passing through
control points 0 and 1. Lune 1 is the same using control pts 1 and 2,
and Lune 2 uses control pts 2 and 0.
"""
#inexact on ellipsoids but close enough
testpt = UnitVector.transform(lon, lat)
testpt_v = testpt.reshape(3,-1)
ctrlpts_v = self.ctrlpts_v
center_v = self.center_v
cx = np.cross(center_v, ctrlpts_v, axis=0)
sk = cx.T @ testpt_v
sg = sk >= 0
ind = sg & ~np.roll(sg, shift=-1, axis=0)
result = np.argmax(ind, axis=0)
return result.reshape(testpt.shape[1:])
class BarycentricMapProjection(CtrlPtsProjection):
nctrlpts = 3
tweak = False
bcenter = np.ones(3)/3
def fixbary(self, bary):
if self.tweak:
return self.fixbary_normalize(bary)
else:
return self.fixbary_subtract(bary)
@staticmethod
def fixbary_normalize(bary):
"""Converts array bary to an array with sum = 1 by dividing by
bary.sum(). Will return nan if bary.sum() == 0.
>>> fixbary_normalize(np.arange(3))
array([0. , 0.33333333, 0.66666667])
"""
bary = np.array(bary)
return bary / np.sum(bary, axis=0, keepdims=True)
@staticmethod
def fixbary_subtract(bary):
"""Converts array bary to an array with sum = 1 by subtracting
(bary.sum() - 1)/bary.shape[0].
>>> fixbary_subtract(np.arange(3))
array([-0.66666667, 0.33333333, 1.33333333])
"""
bary = np.array(bary)
s = (np.sum(bary, axis=0, keepdims=True) - 1)/bary.shape[0]
return bary - s
def _fix_corners(self, lon, lat, result):
ctrlpts = self.ctrlpts
index0 = (lon == ctrlpts[0,0]) & (lat == ctrlpts[1,0])
index1 = (lon == ctrlpts[0,1]) & (lat == ctrlpts[1,1])
index2 = (lon == ctrlpts[0,2]) & (lat == ctrlpts[1,2])
#print(lon, lat, ctrlpts, result)
#print(index0.shape, result.shape, np.array([1, 0, 0])[..., np.newaxis].shape)
result[..., index0] = np.array([1, 0, 0])[..., np.newaxis]
result[..., index1] = np.array([0, 1, 0])[..., np.newaxis]
result[..., index2] = np.array([0, 0, 1])[..., np.newaxis]
return result
def _fix_corners_inv(self, bary, result):
index0 = (bary[0] == 1)
index1 = (bary[1] == 1)
index2 = (bary[2] == 1)
if np.any(index0):
result[..., index0] = self.ctrlpts_v[..., 0, np.newaxis]
if np.any(index1):
result[..., index1] = self.ctrlpts_v[..., 1, np.newaxis]
if np.any(index2):
result[..., index2] = self.ctrlpts_v[..., 2, np.newaxis]
return result
class UVMapProjection(CtrlPtsProjection):
nctrlpts = 4
bcenter = np.ones(2)/2
def _fix_corners(self, lon, lat, result):
ctrlpts = self.ctrlpts
index0 = (lon == ctrlpts[0,0]) & (lat == ctrlpts[1,0])
index1 = (lon == ctrlpts[0,1]) & (lat == ctrlpts[1,1])
index2 = (lon == ctrlpts[0,2]) & (lat == ctrlpts[1,2])
index3 = (lon == ctrlpts[0,3]) & (lat == ctrlpts[1,3])
result[..., index0] = np.array([ 0, 0])[..., np.newaxis]
result[..., index1] = np.array([ 1, 0])[..., np.newaxis]
result[..., index2] = np.array([ 1, 1])[..., np.newaxis]
result[..., index3] = np.array([ 0, 1])[..., np.newaxis]
return result
def _fix_corners_inv(self, x, y, result):
index0 = (x == 0) & (y == 0)
index1 = (x == 1) & (y == 0)
index2 = (x == 1) & (y == 1)
index3 = (x == 0) & (y == 1)
if np.any(index0):
result[..., index0] = self.ctrlpts_v[..., 0, np.newaxis]
if np.any(index1):
result[..., index1] = self.ctrlpts_v[..., 1, np.newaxis]
if np.any(index2):
result[..., index2] = self.ctrlpts_v[..., 2, np.newaxis]
if np.any(index3):
result[..., index3] = self.ctrlpts_v[..., 3, np.newaxis]
return result
#%% not-polygonal projections
class ChambTrimetric(CtrlPtsProjection):
"""Chamberlin trimetric projection"""
#FIXME this implementation fails for control triangles with
#high aspect ratios
nctrlpts = 3
def __init__(self, ctrlpts, geod=_unitsphgeod):
super().__init__(ctrlpts, geod)
self.tgtpts = trigivenlengths(self.sides)
try:
self.orienttgtpts(self.tgtpts)
except ValueError:
pass
def transform(self, x, y, **kwargs):
if hasattr(x, '__iter__'):
raise TypeError()
tgtpts = self.tgtpts
f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],
x*np.ones(3), y*np.ones(3))
faz = self.faz
raz1 = (faz - f) % 360
radsq = np.array(rad).squeeze()**2
ctgt = tgtpts.T.copy().view(dtype=complex).squeeze()
a = np.roll(ctgt, -1) - ctgt
b = ctgt
l = abs(a)
lsq = l**2
rsq = radsq/lsq
ssq = np.roll(radsq, -1, axis=-1)/lsq
x0 = (rsq - ssq + 1)/2
y0 = sqrt(-rsq**2 + 2*rsq*(ssq + 1) - (ssq - 1)**2)/2
y0[np.isnan(y0)] = 0
y = np.where(raz1 > 180, -y0, y0)
z0 = x0 +1j*y
pts = (a * z0 + b)
result = np.mean(pts)
return result.real, result.imag
def invtransform(self, *args, **kwargs):
return NotImplemented
class LstSqTrimetric(ChambTrimetric):
"""Least-squares variation of the Chamberlin trimetric projection"""
def transform(self, x, y, **kwargs):
init = super().transform(x, y)
tgtpts = self.tgtpts
f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],
x*np.ones(3), y*np.ones(3))
def objective(v):
x = v[0]
y = v[1]
a = tgtpts[0]
b = tgtpts[1]
xma = x-a
ymb = y-b
dist = np.sqrt(xma**2 + ymb**2)
result = np.sum((dist - rad)**2 )
f = 1 - rad/dist
f[rad <= 0] = 1
jac = 2*np.array([np.sum(xma*f), np.sum(ymb*f)])
return result, jac
res = minimize(objective, init, jac=True,
method = 'BFGS')
return res.x
class LinearTrimetric(CtrlPtsProjection):
"""The linear variation of the Chamberlin Trimetric projection."""
nctrlpts = 3
matrix1 = np.array([[0,-1],
[1,0]])
matrix2 = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]])
matrixinv1 = np.array([[-2,1,1],
[1,-2,1],
[1,1,-2]])*2/3
def __init__(self, ctrlpts, geod=_unitsphgeod):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
geod= a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1).
"""
super().__init__(ctrlpts, geod)
self.radius = ((geod.a**(3/2) + geod.b**(3/2))/2)**(2/3)
self.tgtpts = trigivenlengths(self.sides)
self.setmat()
# try:
# self.orienttgtpts(self.tgtpts)
# self.setmat()
# except ValueError:
# pass
vctrl = self.ctrlpts_v
self.invctrlvector = np.linalg.pinv(vctrl)
self.invperpmatrix = self.invctrlvector @ self.invctrlvector.T
cosrthmin = 1 / np.sqrt(self.invperpmatrix.sum())
self.hminall = np.arccos(cosrthmin)**2
def setmat(self, tgtpts=None):
"""Set matrices that use tgtpts"""
if tgtpts is None:
tgtpts = self.tgtpts
else:
self.tgtpts = tgtpts
tgtde = np.linalg.det(np.concatenate([tgtpts, np.ones((1,3))], axis=0))
self.m = self.matrix1 @ tgtpts @ self.matrix2 /(2*tgtde)
self.minv = self.matrixinv1 @ tgtpts.T
def transform_v(self, pts):
rpts = pts.reshape((2,-1)).T
rad = []
for x,y in rpts:
f, b, radi = self.geod.inv(x*np.ones(3), y*np.ones(3),
self.ctrlpts[0], self.ctrlpts[1])
rad.append(radi)
shape = list(pts.shape)
shape[0] = 3
rad = np.array(rad).T
radsq = np.array(rad)**2
result = self.m @ radsq
return result.reshape(pts.shape)
def invtransform_v(self, pts, n=20, stop=1E-8):
if not self.geod.sphere:
warnings.warn('inverse transform is approximate on ellipsoids')
rpts = pts.reshape((2,-1))
k = self.minv @ rpts/self.radius**2
hmin = -np.min(k, axis=0)
print('k: ', k)
#hmax = np.pi**2-np.max(k, axis=0)
hminall = self.hminall
h = np.where(hmin < hminall, hminall, hmin)
print('h: ', h)
for i in range(n):
rsq = (k + h)
#pos = rsq > 0
neg = rsq < 0
zer = rsq == 0
c = np.where(neg, np.cosh(np.sqrt(-rsq)), np.cos(np.sqrt(rsq)))
b = np.where(neg, np.sinh(np.sqrt(-rsq)),
np.sin(np.sqrt(rsq)))/np.sqrt(np.abs(rsq))
b[zer] = 1
f = np.einsum('i...,ij,j...', c, self.invperpmatrix, c) - 1
fprime = np.einsum('i...,ij,j...', c, self.invperpmatrix, b)
delta = f/fprime
h += delta
print('delta:', delta)
print('h: ', h)
if np.max(np.abs(delta)) < stop:
break
#h = np.clip(h, hmin, hmax)
rsq = np.clip(k + h, 0, np.pi**2)
c = np.cos(np.sqrt(rsq))
vector = self.invctrlvector.T @ c
print(c)
print(vector)
return UnitVector.invtransform_v(vector).reshape(pts.shape)
def nmforplot(self, pts, n=100):
rpts = pts.reshape((2,-1))
k = self.minv @ rpts/self.radius**2
hmin = -np.min(k, axis=0)
hmax = np.pi**2-np.max(k, axis=0)
h = np.linspace(hmin,hmax,100).T
rsq = (k[..., np.newaxis] + h)
c = np.cos(np.sqrt(rsq))
nm = np.einsum('i...,ij,j...', c, self.invperpmatrix, c)
return h, nm
class Alfredo(BarycentricMapProjection):
"""this doesn't really accomplish anything"""
def __init__(self, ctrlpts, tweak=False):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
self.cosADfactor = (np.cross(np.roll(ctrlpts_v, 1, axis=1),
np.roll(ctrlpts_v, -1, axis=1), axis=0) +
ctrlpts_v * np.linalg.det(ctrlpts_v))
self.tweak = tweak
def transform_v(self, ll):
rll = ll.reshape(2, -1)
ctrlpts_v = self.ctrlpts_v
cosADfactor = self.cosADfactor
vtestpt = UnitVector.transform_v(rll)
cosAPi = (vtestpt.T @ ctrlpts_v).T
cosADi = (vtestpt.T @ cosADfactor).T
pli = np.sqrt((1-cosAPi)/(1-cosADi))
b = 1 - pli
result = self.fixbary(b)
shape = (3,) + ll.shape[1:]
return result.reshape(shape)
def invtransform(self, *args, **kwargs):
return NotImplemented
#%%
class Areal(BarycentricMapProjection):
"""Spherical areal projection."""
def __init__(self, ctrlpts, geod=_unitsphgeod):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
geod: a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1).
"""
super().__init__(ctrlpts, geod)
a_i = np.sum(np.roll(self.ctrlpts_v, -1, axis=1) *
np.roll(self.ctrlpts_v, 1, axis=1), axis=0)
self.a_i = a_i
self.b_i = (np.roll(a_i, -1) + np.roll(a_i, 1))/(1+a_i)
self.tau_c = self.tau(self.area)
def tau(self, area):
"""Convert areas on the geod to tau values for inverse transform"""
return np.tan(area/self.totalarea*2*np.pi)
def transform(self, x, y):
try:
areas = []
for i in range(3):
smtri = self.ctrlpts.copy()
smtri[:, i] = np.array([x,y])
a, _ = self.geod.polygon_area_perimeter(smtri[0],
smtri[1])
areas.append(a)
areas = np.array(areas)
return areas/self.area
except ValueError:
raise TypeError()
def invtransform_v(self, bary):
rbary = bary.reshape(3,-1)
if not self.geod.sphere:
warnings.warn('inverse transform is approximate on ellipsoids')
b_i = self.b_i[:,np.newaxis]
tau = self.tau_c
tau_i = self.tau(self.area*rbary)
t_i = tau_i/tau
c_i = t_i / ((1+b_i) + (1-b_i) * t_i)
f_i = c_i / (1 - np.sum(c_i, axis=0))
vector = self.ctrlpts_v @ f_i
shape = [2] + list(bary.shape[1:])
result = UnitVector.invtransform_v(vector).reshape(shape)
return result
#%%
class BisectTri(BarycentricMapProjection):
"""Inverse is only approximate
"""
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
#v_0 = ctrlpts_v[..., 0]
#v_1 = ctrlpts_v[..., 1]
#v_2 = ctrlpts_v[..., 2]
midpoint_v = np.roll(ctrlpts_v, 1, axis=1) + np.roll(ctrlpts_v, -1, axis=1)
midpoint_v /= np.linalg.norm(midpoint_v, axis=0, keepdims=True)
self.midpoint_v = midpoint_v
self.midpoint = UnitVector.invtransform_v(self.midpoint_v)
aream = []
for i in range(3):
#index = np.roll(np.arange(3), -i)[:2]
#lona = list(ctrlpts[0, index]) + [self.midpoint[0,i],]
#lata = list(ctrlpts[1, index]) + [self.midpoint[1,i],]
#am, _ = self.geod.polygon_area_perimeter(lona, lata)
am = triangle_solid_angle(ctrlpts_v[:,i], ctrlpts_v[:,(i+1)%3],
midpoint_v[:,i])
#vc[:,0], mi, lproj)
aream.append(am)
self.aream = np.array(aream)
def transform(self, lon, lat):
lon + 0
vtestpt = UnitVector.transform(lon, lat)
areas = []
vctrlpts = self.ctrlpts_v
actrlpts = self.ctrlpts
geod = self.geod
area = self.area
for i in range(3):
vc = np.roll(vctrlpts, i, axis=1)
#ac = np.roll(actrlpts, i, axis=1)
mi = self.midpoint_v[:,-i]#?
lproj = -np.cross(np.cross(vc[..., 1], vc[..., 2]),
np.cross(vc[..., 0], vtestpt))
#lllproj = UnitVector.invtransform_v(lproj)
#loni = [ac[0,0], mi[0], lllproj[0]]
#lati = [ac[1,0], mi[1], lllproj[1]]
#a1, _ = geod.polygon_area_perimeter(loni, lati)
a1 = triangle_solid_angle(vc[:,0], mi, lproj)
areas.append(a1)
areas = np.array(areas) + self.aream
aa = areas/area
bx = []
for i in range(3):
x,y,z = np.roll(aa, i, axis=0)
b = (y**2 * x**2 + z**2 * x**2 - y**2 * z**2
- x * y**2 + z * y**2
- 2*y*x**2 - x*z**2 + y*z**2 + x**2
+ 3*y*x + z*x - 2*y*z
- 2*x - y + z + 1)
bx.append(b)
bx = np.array(bx)
betax = bx/bx.sum()
return self._fix_corners(lon, lat, betax)
def invtransform(self, b1, b2, b3):
b1 + 0
beta = np.array([b1,b2,b3])
vctrlpts3 = self.ctrlpts_v
#xs = []
ptts = []
for i in range(3):
beta1, beta2, beta3 = np.roll(beta, -i, axis=0)
x = beta2/(1 - beta1)
#xs.append(x)
a = x * self.area
pt0 = vctrlpts3[:,i]
pt1 = vctrlpts3[:,i-2]
pt2 = vctrlpts3[:,i-1]
cosw = pt1 @ pt2
w = np.arccos(cosw)
sinw = np.sin(w)
p2 = ((np.cos(a/2)* pt2 @ np.cross(pt0, pt1)- np.sin(a/2)*pt2 @ (pt1 + pt0))
+ np.sin(a/2)*cosw*(1 + pt1 @ pt0))
p3 = sinw*np.sin(a/2)*(1 + pt0 @ pt1)
r = 2*p3*p2/(p2**2 - p3**2)
t = np.arctan(r)/w#really close to just x
#print(x, t)
#t = x
ptt = slerp(pt2, pt1, t)
ptts.append(ptt)
ptts = np.array(ptts).T
ns = np.cross(vctrlpts3, ptts, axis=0)
pts = np.cross(ns, np.roll(ns, -1, axis=1), axis=0)
v = pts.sum(axis=1)
v = self._fix_corners_inv(beta, v)
return UnitVector.invtransform_v(v)
class BisectTri2(BarycentricMapProjection):
"""Inverse is only approximate"""
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
#v_0 = ctrlpts_v[..., 0]
#v_1 = ctrlpts_v[..., 1]
#v_2 = ctrlpts_v[..., 2]
midpoint_v = np.roll(ctrlpts_v, 1, axis=1) + np.roll(ctrlpts_v, -1, axis=1)
midpoint_v /= np.linalg.norm(midpoint_v, axis=0, keepdims=True)
self.midpoint_v = midpoint_v
self.midpoint = UnitVector.invtransform_v(self.midpoint_v)
def transform(self, lon, lat):
lon + 0
vtestpt = UnitVector.transform(lon, lat)
aa = []
vctrlpts = self.ctrlpts_v
actrlpts = self.ctrlpts
for i in range(3):
vc = np.roll(vctrlpts, i, axis=1)
ac = np.roll(actrlpts, i, axis=1)
mi = self.midpoint[:,-i]
lproj = -np.cross(np.cross(vc[..., 1], vc[..., 2]),
np.cross(vc[..., 0], vtestpt))
lllproj = UnitVector.invtransform_v(lproj)
dist1x = central_angle(vc[..., 1], lproj)
f, b, dist1x = self.geod.inv(mi[0], mi[1],
lllproj[0],lllproj[1])
f0, b0, _ = self.geod.inv(mi[0], mi[1],
ac[0,2], ac[1,2])
deltaf = (f-f0) % 360
if (deltaf <= 90) | (deltaf > 270):
s = 1
else:
s = -1
t = s*dist1x/self.sides[i] + 1/2
#print(t)
aa.append(t)
bx = []
for i in range(3):
x,y,z = np.roll(aa, i, axis=0)
b = (y**2 * x**2 + z**2 * x**2 - y**2 * z**2
- x * y**2 + z * y**2
- 2*y*x**2 - x*z**2 + y*z**2 + x**2
+ 3*y*x + z*x - 2*y*z
- 2*x - y + z + 1)
bx.append(b)
bx = np.array(bx)
betax = bx/bx.sum()
return self._fix_corners(lon, lat, betax)
def invtransform(self, b1, b2, b3):
b1 + 0
beta = np.array([b1,b2,b3])
vctrlpts3 = self.ctrlpts_v
#xs = []
ptts = []
for i in range(3):
beta1, beta2, beta3 = np.roll(beta, -i, axis=0)
x = beta2/(1 - beta1)
pt1 = vctrlpts3[:,i-2]
pt2 = vctrlpts3[:,i-1]
ptt = slerp(pt2, pt1, x)
ptts.append(ptt)
ptts = np.array(ptts).T
ns = np.cross(vctrlpts3, ptts, axis=0)
pts = np.cross(ns, np.roll(ns, -1, axis=1), axis=0)
v = pts.sum(axis=1)
v = self._fix_corners_inv(beta, v)
return UnitVector.invtransform_v(v)
class FullerEq(BarycentricMapProjection):
def transform_v(self, ll):
vtestpt_pre = UnitVector.transform(*ll)
vtestpt = vtestpt_pre.reshape(3,-1)
ctrlpts_v = self.ctrlpts_v
b = []
for i in range(3):
v0 = ctrlpts_v[..., i]
v1 = ctrlpts_v[..., (i+1)%3]
v2 = ctrlpts_v[..., (i-1)%3]
cosw01 = v0 @ v1
cosw02 = v0 @ v2
w01 = np.arccos(cosw01)
w02 = np.arccos(cosw02)
w = (w01 + w02) / 2
sinw = np.sin(w)
cosw = np.cos(w)
vt01 = np.tensordot(vtestpt, np.cross(v0, v1), axes=(0,0))
vt12 = np.tensordot(vtestpt, np.cross(v1, v2), axes=(0,0))
vt20 = np.tensordot(vtestpt, np.cross(v2, v0), axes=(0,0))
bi = np.arctan2(sinw*vt12, cosw*vt12 + vt01 + vt20)/w
#gx = vt12 + cosw*(vt01 + vt20)
#tx = np.arctan2(sinw*(vt20 + vt01),gx)/w
b.append(bi)
#b.append(1-tx)
b = np.array(b)
result = self.fixbary_subtract(b)
return result.reshape(vtestpt_pre.shape)
def invtransform(self, b1, b2, b3):
b1 + 0 #still not vectorized
bi = np.array([b1, b2, b3])
v0 = self.ctrlpts_v[..., 0]
v1 = self.ctrlpts_v[..., 1]
v2 = self.ctrlpts_v[..., 2]
w = self.ca.mean()
bi = np.array([b1, b2, b3])
cw = np.cos(w)
#sw = np.sin(w)
cbw = np.cos(bi*w)
sbw = np.sin(bi*w)
pcbw = np.product(cbw)
psbw = np.product(sbw)
scc = np.sum(sbw * np.roll(cbw, -1) * np.roll(cbw, 1))
css = np.sum(cbw*np.roll(sbw, -1)*np.roll(sbw, 1))
objw2 = np.array([2*pcbw - cw - 1,
2*scc,
3*pcbw + 3 - css,
2*psbw])
rts = np.roots(objw2)[-1]#FIXME solve this cubic explicitly
rts = rts.real
k = np.arctan(rts)/w
#f0 = np.where(bi[0] + k > 1, -1, 1)
f1 = np.where(bi[1] + k > 1, -1, 1)
f2 = np.where(bi[2] + k > 1, -1, 1)
#v01 = slerp(v1, v0, bi[0] + k)
#v02 = slerp(v2, v0, bi[0] + k)
#cx12 = np.cross(v01, v02)*f0
v12 = slerp(v2, v1, bi[1] + k)
v10 = slerp(v0, v1, bi[1] + k)
cx20 = np.cross(v12, v10)*f1
v20 = slerp(v0, v2, bi[2] + k)
v21 = slerp(v1, v2, bi[2] + k)
cx01 = np.cross(v20, v21)*f2
v0x = normalize(np.cross(cx20, cx01))
#v1x = normalize(np.cross(cx01, cx12))
#v2x = normalize(np.cross(cx12, cx20))
v0x = self._fix_corners_inv(bi, v0x)
#print(v0x)
return UnitVector.invtransform_v(v0x)
class Fuller(BarycentricMapProjection):
def __init__(self, ctrlpts, tweak=False):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
self.tweak = tweak
def transform(self, lon, lat):
lon + 0#will TypeError if lon is not a number
vtestpt = UnitVector.transform(lon, lat)
ctrlpts_v = self.ctrlpts_v
b = []
for i in range(3):
v0 = ctrlpts_v[..., i]
v1 = ctrlpts_v[..., (i+1)%3]
v2 = ctrlpts_v[..., (i+2)%3]
vt01 = vtestpt @ np.cross(v0, v1)
vt12 = vtestpt @ np.cross(v1, v2)
vt20 = vtestpt @ np.cross(v2, v0)
cosw01 = v0 @ v1
cosw02 = v0 @ v2
w01 = np.arccos(cosw01)
w02 = np.arccos(cosw02)
if np.isclose(w01, w02):
w = (w01 + w02) / 2
sinw = np.sin(w)
cosw = np.cos(w)
g = vt12 + cosw*(vt01 + vt20)
ti = self._b_eq(w, sinw, vt20, vt01, g)
else:
sinw01 = sqrt(1 - cosw01**2)
sinw02 = sqrt(1 - cosw02**2)
g = vt12 + cosw02*vt01 + cosw01*vt20
ti = self._b_neq(w01, sinw02, vt01, w02, sinw01, vt20, g)
b.append(1-ti)
return self.fixbary(b)
def _b_neq(self, w01, sinw02, vt01, w02, sinw01, vt20, g):
t0 = (w01*sinw02*vt01 + w02*sinw01*vt20)/(g*w01*w02)
if ~np.isfinite(t0):
t0 = 0
else:
lim = np.pi/np.array([w01,w02]).max()
t0 = np.clip(t0, -lim, lim)
if abs(t0) < 1E-3:
return t0
w = (w01 + w02) / 2
sinw = np.sin(w)
t1 = self._b_eq(w, sinw, vt20, vt01, g)
t0 = np.clip(t0, -abs(t1), abs(t1))
c1 = sqrt(g**2 + (sinw01*vt20 - sinw02*vt01)**2)
c2 = sqrt(g**2 + (sinw01*vt20 + sinw02*vt01)**2)
d1 = np.arctan2(sinw01*vt20 - sinw02*vt01, g)
d2 = np.arctan2(sinw01*vt20 + sinw02*vt01, g)
def objective(t):
if t < -lim or t > lim:
return t**2, 2*t
if t == 0:
t = np.finfo(float).eps
z = c1*np.cos((w01 - w02)*t - d1) - c2*np.cos((w01 + w02)*t - d2)
dz = (-c1*(w01 - w02)*np.sin((w01 - w02)*t - d1)
+ c2*(w01 + w02)*np.sin((w01 + w02)*t - d2))
return z/t, (t*dz - z)*t**-2
res = root_scalar(objective, fprime=True, method='newton', x0=t0)
return res.root
def _b_eq(self, w, sinw, vt20, vt01, gx):
#x = sinw*(vt20 + vt01)/gx
tx = np.arctan2(sinw*(vt20 + vt01),gx)/w
#this form would be more efficient:
#b = np.arctan2(sinw*vt12, cosw*vt12 + vt01 + vt20)/w
return tx
def invtransform(self, b1, b2, b3):
if self.tweak:
return self._invtransform_normalize(b1, b2, b3)
else:
return self._invtransform_subtract(b1, b2, b3)
def _invtransform_subtract(self, b1, b2, b3):
b1 + 0#will TypeError if not a number
bi = np.array([b1, b2, b3])
v0 = self.ctrlpts_v[..., 0]
v1 = self.ctrlpts_v[..., 1]
v2 = self.ctrlpts_v[..., 2]
def objective(k):
f0 = np.where(bi[0] + k > 1, -1, 1)
f1 = np.where(bi[1] + k > 1, -1, 1)
f2 = np.where(bi[2] + k > 1, -1, 1)
v01 = slerp(v1, v0, bi[0] + k)
v02 = slerp(v2, v0, bi[0] + k)
cx12 = np.cross(v01, v02)*f0
v12 = slerp(v2, v1, bi[1] + k)
v10 = slerp(v0, v1, bi[1] + k)
cx20 = np.cross(v12, v10)*f1
v20 = slerp(v0, v2, bi[2] + k)
v21 = slerp(v1, v2, bi[2] + k)
cx01 = np.cross(v20, v21)*f2
v0x = normalize(np.cross(cx20, cx01))
v1x = normalize(np.cross(cx01, cx12))
v2x = normalize(np.cross(cx12, cx20))
#this is slightly more robust than the triple product
return (np.linalg.norm(v0x-v1x)
+ np.linalg.norm(v1x-v2x)
+ np.linalg.norm(v2x-v0x))
# dv01 = dslerp(v1, v0, bi[0] + k)
# dv02 = dslerp(v2, v0, bi[0] + k)
# dcx12 = (np.cross(dv01, v02) + np.cross(v01, dv02))*f0
# dv12 = dslerp(v2, v1, bi[1] + k)
# dv10 = dslerp(v0, v1, bi[1] + k)
# dcx20 = (np.cross(dv12, v10) + np.cross(v12, dv10))*f1
# dv20 = dslerp(v0, v2, bi[2] + k)
# dv21 = dslerp(v1, v2, bi[2] + k)
# dcx01 = (np.cross(dv20, v21) + np.cross(v20, dv21))*f2
# derivative = dcx12 @ v0x + dcx20 @ v1x + dcx01 @ v2x
# return cx12 @ v0x, derivative
if b1 == 0 or b2 == 0 or b3 == 0:
k = 0
elif np.allclose(self.sides, np.roll(self.sides, 1)):
kx = self._k_eq(b1, b2, b3)
k = kx[2]#FIXME is 2 always the right one?
else:
#FIXME why is this so freakin slow
res = minimize_scalar(objective, bracket=[0,0.1])
k = res.x
#f0 = np.where(bi[0] + k > 1, -1, 1)
f1 = np.where(bi[1] + k > 1, -1, 1)
f2 = np.where(bi[2] + k > 1, -1, 1)
#v01 = slerp(v1, v0, bi[0] + k)
#v02 = slerp(v2, v0, bi[0] + k)
#cx12 = np.cross(v01, v02)*f0
v12 = slerp(v2, v1, bi[1] + k)
v10 = slerp(v0, v1, bi[1] + k)
cx20 = np.cross(v12, v10)*f1
v20 = slerp(v0, v2, bi[2] + k)
v21 = slerp(v1, v2, bi[2] + k)
cx01 = np.cross(v20, v21)*f2
v0x = normalize(np.cross(cx20, cx01))
#v1x = normalize(np.cross(cx01, cx12))
#v2x = normalize(np.cross(cx12, cx20))
v0x = self._fix_corners_inv(bi, v0x)
return UnitVector.invtransform_v(v0x)
def _k_eq(self, b1, b2, b3):
w = self.ca.mean()
bi = np.array([b1, b2, b3])
cw = np.cos(w)
#sw = np.sin(w)
cbw = np.cos(bi*w)
sbw = np.sin(bi*w)
pcbw = np.product(cbw)
psbw = np.product(sbw)
scc = np.sum(sbw * np.roll(cbw, -1) * np.roll(cbw, 1))
css = np.sum(cbw*np.roll(sbw, -1)*np.roll(sbw, 1))
objw2 = np.array([2*pcbw - cw - 1,
2*scc,
3*pcbw + 3 - css,
2*psbw])
rts = np.roots(objw2)
return np.arctan(rts)/w
def _invtransform_normalize(self, b1, b2, b3):
b1 + 0#will TypeError if not a number
bi = np.array([b1, b2, b3])
v0 = self.ctrlpts_v[..., 0]
v1 = self.ctrlpts_v[..., 1]
v2 = self.ctrlpts_v[..., 2]
def objective(k):
f0 = np.where(bi[0] * k > 1, -1, 1)
f1 = np.where(bi[1] * k > 1, -1, 1)
f2 = np.where(bi[2] * k > 1, -1, 1)
v01 = slerp(v1, v0, bi[0] * k)
v02 = slerp(v2, v0, bi[0] * k)
cx12 = normalize(np.cross(v01, v02))*f0
v12 = slerp(v2, v1, bi[1] * k)
v10 = slerp(v0, v1, bi[1] * k)
cx20 = normalize(np.cross(v12, v10))*f1
v20 = slerp(v0, v2, bi[2] * k)
v21 = slerp(v1, v2, bi[2] * k)
cx01 = normalize(np.cross(v20, v21))*f2
v0x = normalize(np.cross(cx20, cx01))
v1x = normalize(np.cross(cx01, cx12))
v2x = normalize(np.cross(cx12, cx20))
#i think this is slightly more robust than the triple product
return (np.linalg.norm(v0x-v1x)
+ np.linalg.norm(v1x-v2x)
+ np.linalg.norm(v2x-v0x))
res = minimize_scalar(objective, bracket=[1,1.1])
k = res.x
#f0 = np.where(bi[0] * k > 1, -1, 1)
f1 = np.where(bi[1] * k > 1, -1, 1)
f2 = np.where(bi[2] * k > 1, -1, 1)
v12 = slerp(v2, v1, bi[1] * k)
v10 = slerp(v0, v1, bi[1] * k)
cx20 = normalize(np.cross(v12, v10))*f1
v20 = slerp(v0, v2, bi[2] * k)
v21 = slerp(v1, v2, bi[2] * k)
cx01 = normalize(np.cross(v20, v21))*f2
v0x = normalize(np.cross(cx20, cx01))
v0x = self._fix_corners_inv(bi, v0x)
return UnitVector.invtransform_v(v0x)
class SnyderEA(BarycentricMapProjection):
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
v_0 = ctrlpts_v[..., 0]
v_1 = ctrlpts_v[..., 1]
v_2 = ctrlpts_v[..., 2]
self.v_01 = v_0 @ v_1
self.v_12 = v_1 @ v_2
self.v_20 = v_2 @ v_0
self.v_012 = np.linalg.det(ctrlpts_v)
self.c = self.v_12
self.c2 = self.c**2
self.s2 = 1 - self.c2
self.s = sqrt(self.s2)
self.w = np.arccos(self.c)
self.midpoint_v = v_1 + v_2
self.midpoint = UnitVector.invtransform_v(self.midpoint_v)
lona = list(ctrlpts[0,:2]) + [self.midpoint[0],]
lata = list(ctrlpts[1,:2]) + [self.midpoint[1],]
self.area01m, _ = self.geod.polygon_area_perimeter(lona, lata)
def transform(self, lon, lat):
lon + 0
actrlpts = self.ctrlpts
ctrlpts_v = self.ctrlpts_v
area = self.area
geod = self.geod
vtestpt = UnitVector.transform(lon, lat)
lproj = -np.cross(np.cross(ctrlpts_v[..., 1], ctrlpts_v[..., 2]),
np.cross(ctrlpts_v[..., 0], vtestpt))
norm = np.linalg.norm(lproj, axis=0, keepdims=True)
if norm != 0:
lproj = lproj / norm
lllproj = UnitVector.invtransform_v(lproj)
cosAP = ctrlpts_v[..., 0] @ vtestpt
cosAD = ctrlpts_v[..., 0] @ lproj
pl = sqrt((1-cosAP)/(1-cosAD))
b0 = 1 - pl
lona = [actrlpts[0,0], self.midpoint[0], lllproj[0]]
lata = [actrlpts[1,0], self.midpoint[1], lllproj[1]]
a1, _ = geod.polygon_area_perimeter(lona, lata)
a1 += self.area01m
b2 = a1/area * pl
b1 = 1 - b0 - b2
result = np.stack([b0,b1,b2])
bresult = self._fix_corners(lon, lat, result)
return np.where(np.isfinite(bresult), bresult, 0)
def invtransform(self, b1, b2, b3):
ctrlpts_v = self.ctrlpts_v
area = self.area
lp = np.array(1-b1)
#make this an array so it won't complain about zero division, impute later
a = b3/lp
v_01 = self.v_01
v_20 = self.v_20
v_012 = self.v_012
c = self.c
s = self.s
w = self.w
Ar = a * area
sA = np.sin(Ar)
cA = 1 - np.cos(Ar)
Fp = ((sA * v_012 + cA*(v_01*c - v_20))**2 - (s*cA*(1 + v_01))**2)
Gp = 2*cA*s*(1 + v_01)*(sA*v_012 + cA*(v_01*c - v_20))
result = 1/w*np.arctan2(Gp, Fp)
vd = slerp(ctrlpts_v[..., 1], ctrlpts_v[..., 2], result)
AdotD = ctrlpts_v[..., 0] @ vd
AdotP = 1 - lp**2*(1-AdotD)
t = np.arccos(AdotP)/np.arccos(AdotD)
vresult = slerp(ctrlpts_v[..., 0], vd, t)
bary = np.stack([b1, b2, b3])
vresult = self._fix_corners_inv(bary, vresult)
vresult[~np.isfinite(vresult)] = 0
return UnitVector.invtransform_v(vresult)
class SnyderEA3(BarycentricMapProjection):
tmat = np.array([[1/3,0,0],
[1/3,1,0],
[1/3,0,1]])
tmatinv = np.array([[3,0,0],
[-1,1,0],
[-1,0,1]])
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
subproj = []
#want the center that divides the triangle into 3 equal-area triangles
ap = Areal(ctrlpts)
center = ap.invtransform(1/3, 1/3, 1/3)
self.center = center
self.center_v = UnitVector.transform(*center)
arr = np.arange(3)
for i in range(3):
index = np.roll(arr, -i)[1:]
cp = np.concatenate([center[:,np.newaxis],
ctrlpts[:, index]], axis=1)
pj = SnyderEA(cp)
subproj.append(pj)
self.subproj = subproj
def transform(self, lon, lat):
subproj = self.subproj
i = self.lune(lon, lat)
pj = subproj[i-1]#shift because we want the opposite vertex
betap = pj.transform(lon, lat)
betax = self.tmat @ betap
beta = np.roll(betax, i-1, axis=0)
return beta
def invtransform(self, b1, b2, b3):
bary = np.array([b1,b2,b3])
i = (Barycentric.segment(bary) ) % 3
betax = np.roll(bary, -i, axis=0)
betap = self.tmatinv @ betax
pj = self.subproj[i]#FIXME ?
return pj.invtransform(*betap)
class SnyderEASym(BarycentricMapProjection):
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
subproj = []
for i in range(3):
cp = np.roll(ctrlpts, i, axis=1)
pj = SnyderEA(cp)
subproj.append(pj)
self.subproj = subproj
def transform(self, lon, lat):
subproj = self.subproj
for i in range(3):
pj = subproj[i]
b = np.roll(pj.transform(lon, lat), -i, axis=0)
try:
beta += b
except NameError:
beta = b
return beta/3
def invtransform(self, *args, **kwargs):
return NotImplemented
def schwarz_fp(alpha, beta, gam):
"""Parameters of the Schwarz triangle map.
Args:
alpha, beta, gamma: Equal to pi times an angle of the triangle.
Returns:
s1: Value of the Schwarz triangle map at z=1.
sinf: Value of the Schwarz triangle map at z=infinity.
scale: Scale factor for spherical triangles. Will be zero or undefined
if alpha + beta + gamma <= 1.
"""
a = (1 - alpha - beta - gam)/2
b = (1 - alpha + beta - gam)/2
c = 1 - alpha
palpha = np.pi*alpha
pbeta = np.pi*beta
pgam = np.pi*gam
gfact = gamma(2-c)/(gamma(1-a)*gamma(c))
s1 = gamma(c-a)*gamma(c-b)/gamma(1-b)*gfact
sinf = np.exp(1j*palpha)*gamma(b)*gamma(c-a)*gfact/gamma(b-c+1)
scale = sqrt(abs((np.cos(palpha+pbeta)+np.cos(pgam))/
(np.cos(palpha-pbeta)+np.cos(pgam))))
return s1, sinf, scale
def c2c_mobius_finite(z,zi,wi):
"""Mobius transformation defined by mapping the points in zi to the points
in wi."""
ones = np.ones(zi.shape)
a = np.linalg.det(np.stack([zi*wi,wi,ones]))
b = np.linalg.det(np.stack([zi*wi,zi,wi]))
c = np.linalg.det(np.stack([zi,wi,ones]))
d = np.linalg.det(np.stack([zi*wi,zi,ones]))
return (a*z+b)/(c*z+d)
def c2c_mobius_01inf(z, z0=0, z1=1, zinf=1j ):
"""Mobius transformation defined by mapping 3 points to 0, 1, infinity"""
if ~np.isfinite(zinf):
return (z-z0)/(z1-z0)
elif ~np.isfinite(z1):
return (z-z0)/(z-zinf)
elif ~np.isfinite(z0):
return (z1-zinf)/(z-zinf)
else:
return (z-z0)*(z1-zinf)/((z-zinf)*(z1-z0))
class ConformalTri(CtrlPtsProjection):
nctrlpts = 3
def __init__(self, ctrlpts, tgtpts, geod=_unitsphgeod):
super().__init__(ctrlpts, geod=geod)
self.tgtpts = float2d_to_complex(tgtpts.T).squeeze()
actrlpts = ctrlpts
basei = 0
basept = actrlpts[:, basei]
crsst = {'proj': 'stere',
'lon_0': basept[0],
'lat_0': basept[1]}
world_crs = {'init': 'epsg:4326'}
stert = pyproj.transformer.Transformer.from_crs(world_crs,
crs_to=crsst)
sterti = pyproj.transformer.Transformer.from_crs(crsst,
crs_to=world_crs)
self.stert = stert
self.sterti = sterti
self.ctrl_s1, self.ctrl_sinf, self.ctrl_scale = schwarz_fp(*self.ctrl_angles/180)
alpha, beta, gam = self.ctrl_angles/180
self.a = (1 - alpha - beta - gam)/2
self.b = (1 - alpha + beta - gam)/2
self.c = 1 - alpha
self.ap = (1 + alpha - beta - gam)/2#a - c + 1
self.bp = (1 + alpha + beta - gam)/2#b - c + 1
self.cp = 1 + alpha#2-c
tgt_sides = abs( | np.roll(self.tgtpts, 1) | numpy.roll |
from scvelo.plotting.docs import doc_scatter, doc_params
from scvelo.plotting.utils import *
from inspect import signature
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
@doc_params(scatter=doc_scatter)
def scatter(
adata=None,
basis=None,
x=None,
y=None,
vkey=None,
color=None,
use_raw=None,
layer=None,
color_map=None,
colorbar=None,
palette=None,
size=None,
alpha=None,
linewidth=None,
linecolor=None,
perc=None,
groups=None,
sort_order=True,
components=None,
projection=None,
legend_loc=None,
legend_loc_lines=None,
legend_fontsize=None,
legend_fontweight=None,
legend_fontoutline=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
figsize=None,
xlim=None,
ylim=None,
add_density=None,
add_assignments=None,
add_linfit=None,
add_polyfit=None,
add_rug=None,
add_text=None,
add_text_pos=None,
add_quiver=None,
quiver_size=None,
add_outline=None,
outline_width=None,
outline_color=None,
n_convolve=None,
smooth=None,
rescale_color=None,
color_gradients=None,
dpi=None,
frameon=None,
zorder=None,
ncols=None,
nrows=None,
wspace=None,
hspace=None,
show=None,
save=None,
ax=None,
**kwargs,
):
"""\
Scatter plot along observations or variables axes.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
x: `str`, `np.ndarray` or `None` (default: `None`)
x coordinate
y: `str`, `np.ndarray` or `None` (default: `None`)
y coordinate
{scatter}
Returns
-------
If `show==False` a `matplotlib.Axis`
"""
if adata is None and (x is not None and y is not None):
adata = AnnData(np.stack([x, y]).T)
# restore old conventions
add_assignments = kwargs.pop("show_assignments", add_assignments)
add_linfit = kwargs.pop("show_linear_fit", add_linfit)
add_polyfit = kwargs.pop("show_polyfit", add_polyfit)
add_density = kwargs.pop("show_density", add_density)
add_rug = kwargs.pop("rug", add_rug)
basis = kwargs.pop("var_names", basis)
# keys for figures (fkeys) and multiple plots (mkeys)
fkeys = ["adata", "show", "save", "groups", "ncols", "nrows", "wspace", "hspace"]
fkeys += ["ax", "kwargs"]
mkeys = ["color", "layer", "basis", "components", "x", "y", "xlabel", "ylabel"]
mkeys += ["title", "color_map", "add_text"]
scatter_kwargs = {"show": False, "save": False}
for key in signature(scatter).parameters:
if key not in mkeys + fkeys:
scatter_kwargs[key] = eval(key)
mkwargs = {}
for key in mkeys: # mkwargs[key] = key for key in mkeys
mkwargs[key] = eval("{0}[0] if is_list({0}) else {0}".format(key))
# use c & color and cmap & color_map interchangeably,
# and plot each group separately if groups is 'all'
if "c" in kwargs:
color = kwargs.pop("c")
if "cmap" in kwargs:
color_map = kwargs.pop("cmap")
if "rasterized" not in kwargs:
kwargs["rasterized"] = settings._vector_friendly
if isinstance(color_map, (list, tuple)) and all(
[is_color_like(c) or c == "transparent" for c in color_map]
):
color_map = rgb_custom_colormap(colors=color_map)
if isinstance(groups, str) and groups == "all":
if color is None:
color = default_color(adata)
if is_categorical(adata, color):
vc = adata.obs[color].value_counts()
groups = [[c] for c in vc[vc > 0].index]
if isinstance(add_text, (list, tuple, np.ndarray, np.record)):
add_text = list(np.array(add_text, dtype=str))
# create list of each mkey and check if all bases are valid.
color, layer, components = to_list(color), to_list(layer), to_list(components)
x, y, basis = to_list(x), to_list(y), to_valid_bases_list(adata, basis)
# get multikey (with more than one element)
multikeys = eval(f"[{','.join(mkeys)}]")
if is_list_of_list(groups):
multikeys.append(groups)
key_lengths = np.array([len(key) if is_list(key) else 1 for key in multikeys])
multikey = (
multikeys[np.where(key_lengths > 1)[0][0]] if np.max(key_lengths) > 1 else None
)
# gridspec frame for plotting multiple keys (mkeys: list or tuple)
if multikey is not None:
if np.sum(key_lengths > 1) == 1 and is_list_of_str(multikey):
multikey = unique(multikey) # take unique set if no more than one multikey
if len(multikey) > 20:
raise ValueError("Please restrict the passed list to max 20 elements.")
if ax is not None:
logg.warn("Cannot specify `ax` when plotting multiple panels.")
if is_list(title):
title *= int(np.ceil(len(multikey) / len(title)))
if nrows is None:
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
else:
ncols = int(np.ceil(len(multikey) / nrows))
if not frameon:
lloc, llines = "legend_loc", "legend_loc_lines"
if lloc in scatter_kwargs and scatter_kwargs[lloc] is None:
scatter_kwargs[lloc] = "none"
if llines in scatter_kwargs and scatter_kwargs[llines] is None:
scatter_kwargs[llines] = "none"
grid_figsize, dpi = get_figure_params(figsize, dpi, ncols)
grid_figsize = (grid_figsize[0] * ncols, grid_figsize[1] * nrows)
fig = pl.figure(None, grid_figsize, dpi=dpi)
hspace = 0.3 if hspace is None else hspace
gspec = pl.GridSpec(nrows, ncols, fig, hspace=hspace, wspace=wspace)
ax = []
for i, gs in enumerate(gspec):
if i < len(multikey):
g = groups[i * (len(groups) > i)] if is_list_of_list(groups) else groups
multi_kwargs = {"groups": g}
for key in mkeys: # multi_kwargs[key] = key[i] if is multikey else key
multi_kwargs[key] = eval(
"{0}[i * (len({0}) > i)] if is_list({0}) else {0}".format(key)
)
ax.append(
scatter(
adata,
ax=pl.subplot(gs),
**multi_kwargs,
**scatter_kwargs,
**kwargs,
)
)
if not frameon and isinstance(ylabel, str):
set_label(xlabel, ylabel, fontsize, ax=ax[0], fontweight="bold")
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
else:
# make sure that there are no more lists, e.g. ['clusters'] becomes 'clusters'
color_map = to_val(color_map)
color, layer, basis = to_val(color), to_val(layer), to_val(basis)
x, y, components = to_val(x), to_val(y), to_val(components)
xlabel, ylabel, title = to_val(xlabel), to_val(ylabel), to_val(title)
# multiple plots within one ax for comma-separated y or layers (string).
if any([isinstance(key, str) and "," in key for key in [y, layer]]):
# comma split
y, layer, color = [
[k.strip() for k in key.split(",")]
if isinstance(key, str) and "," in key
else to_list(key)
for key in [y, layer, color]
]
multikey = y if len(y) > 1 else layer if len(layer) > 1 else None
if multikey is not None:
for i, mi in enumerate(multikey):
ax = scatter(
adata,
x=x,
y=y[i * (len(y) > i)],
color=color[i * (len(color) > i)],
layer=layer[i * (len(layer) > i)],
basis=basis,
components=components,
groups=groups,
xlabel=xlabel,
ylabel="expression" if ylabel is None else ylabel,
color_map=color_map,
title=y[i * (len(y) > i)] if title is None else title,
ax=ax,
**scatter_kwargs,
)
if legend_loc is None:
legend_loc = "best"
if legend_loc and legend_loc != "none":
multikey = [key.replace("Ms", "spliced") for key in multikey]
multikey = [key.replace("Mu", "unspliced") for key in multikey]
ax.legend(multikey, fontsize=legend_fontsize, loc=legend_loc)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
elif color_gradients is not None and color_gradients is not False:
vals, names, color, scatter_kwargs = gets_vals_from_color_gradients(
adata, color, **scatter_kwargs
)
cols = zip(adata.obs[color].cat.categories, adata.uns[f"{color}_colors"])
c_colors = {cat: col for (cat, col) in cols}
mkwargs.pop("color")
ax = scatter(
adata,
color="grey",
ax=ax,
**mkwargs,
**get_kwargs(scatter_kwargs, {"alpha": 0.05}),
) # background
ax = scatter(
adata,
color=color,
ax=ax,
**mkwargs,
**get_kwargs(scatter_kwargs, {"s": 0}),
) # set legend
sorted_idx = np.argsort(vals, 1)[:, ::-1][:, :2]
for id0 in range(len(names)):
for id1 in range(id0 + 1, len(names)):
cmap = rgb_custom_colormap(
[c_colors[names[id0]], "white", c_colors[names[id1]]],
alpha=[1, 0, 1],
)
mkwargs.update({"color_map": cmap})
c_vals = np.array(vals[:, id1] - vals[:, id0]).flatten()
c_bool = np.array([id0 in c and id1 in c for c in sorted_idx])
if np.sum(c_bool) > 1:
_adata = adata[c_bool] if np.sum(~c_bool) > 0 else adata
mkwargs["color"] = c_vals[c_bool]
ax = scatter(
_adata, ax=ax, **mkwargs, **scatter_kwargs, **kwargs
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
# actual scatter plot
else:
# set color, color_map, edgecolor, basis, linewidth, frameon, use_raw
if color is None:
color = default_color(adata, add_outline)
if "cmap" not in kwargs:
kwargs["cmap"] = (
default_color_map(adata, color) if color_map is None else color_map
)
if "s" not in kwargs:
kwargs["s"] = default_size(adata) if size is None else size
if "edgecolor" not in kwargs:
kwargs["edgecolor"] = "none"
is_embedding = ((x is None) | (y is None)) and basis not in adata.var_names
if basis is None and is_embedding:
basis = default_basis(adata)
if linewidth is None:
linewidth = 1
if linecolor is None:
linecolor = "k"
if frameon is None:
frameon = True if not is_embedding else settings._frameon
if isinstance(groups, str):
groups = [groups]
if use_raw is None and basis not in adata.var_names:
use_raw = layer is None and adata.raw is not None
if projection == "3d":
from mpl_toolkits.mplot3d import Axes3D
ax, show = get_ax(ax, show, figsize, dpi, projection)
# phase portrait: get x and y from .layers (e.g. spliced vs. unspliced)
# NOTE(Haotian): true phase portrait plot here
if basis in adata.var_names:
if title is None:
title = basis
if x is None and y is None:
x = default_xkey(adata, use_raw=use_raw)
y = default_ykey(adata, use_raw=use_raw)
elif x is None or y is None:
raise ValueError("Both x and y have to specified.")
if isinstance(x, str) and isinstance(y, str):
layers_keys = list(adata.layers.keys()) + ["X"]
if any([key not in layers_keys for key in [x, y]]):
raise ValueError("Could not find x or y in layers.")
if xlabel is None:
xlabel = x
if ylabel is None:
ylabel = y
# NOTE(Haotian): the data to plot is retrieved here
x = get_obs_vector(adata, basis, layer=x, use_raw=use_raw)
y = get_obs_vector(adata, basis, layer=y, use_raw=use_raw)
if legend_loc is None:
legend_loc = "none"
if use_raw and perc is not None:
ub = np.percentile(x, 99.9 if not isinstance(perc, int) else perc)
ax.set_xlim(right=ub * 1.05)
ub = np.percentile(y, 99.9 if not isinstance(perc, int) else perc)
ax.set_ylim(top=ub * 1.05)
# velocity model fits (full dynamics and steady-state ratios)
if any(["gamma" in key or "alpha" in key for key in adata.var.keys()]):
plot_velocity_fits(
adata,
basis,
vkey,
use_raw,
linewidth,
linecolor,
legend_loc_lines,
legend_fontsize,
add_assignments,
ax=ax,
)
# embedding: set x and y to embedding coordinates
elif is_embedding:
X_emb = adata.obsm[f"X_{basis}"][:, get_components(components, basis)]
x, y = X_emb[:, 0], X_emb[:, 1]
# todo: 3d plotting
# z = X_emb[:, 2] if projection == "3d" and X_emb.shape[1] > 2 else None
elif isinstance(x, str) and isinstance(y, str):
var_names = (
adata.raw.var_names
if use_raw and adata.raw is not None
else adata.var_names
)
if layer is None:
layer = default_xkey(adata, use_raw=use_raw)
x_keys = list(adata.obs.keys()) + list(adata.layers.keys())
is_timeseries = y in var_names and x in x_keys
if xlabel is None:
xlabel = x
if ylabel is None:
ylabel = layer if is_timeseries else y
if title is None:
title = y if is_timeseries else color
if legend_loc is None:
legend_loc = "none"
# gene trend: x and y as gene along obs/layers (e.g. pseudotime)
if is_timeseries:
x = (
adata.obs[x]
if x in adata.obs.keys()
else adata.obs_vector(y, layer=x)
)
y = get_obs_vector(adata, basis=y, layer=layer, use_raw=use_raw)
# get x and y from var_names, var or obs
else:
if x in var_names and y in var_names:
if layer in adata.layers.keys():
x = adata.obs_vector(x, layer=layer)
y = adata.obs_vector(y, layer=layer)
else:
data = adata.raw if use_raw else adata
x, y = data.obs_vector(x), data.obs_vector(y)
elif x in adata.var.keys() and y in adata.var.keys():
x, y = adata.var[x], adata.var[y]
elif x in adata.obs.keys() and y in adata.obs.keys():
x, y = adata.obs[x], adata.obs[y]
elif np.any(
[var_key in x or var_key in y for var_key in adata.var.keys()]
):
var_keys = [
k
for k in adata.var.keys()
if not isinstance(adata.var[k][0], str)
]
var = adata.var[var_keys]
x = var.astype(np.float32).eval(x)
y = var.astype(np.float32).eval(y)
elif np.any(
[obs_key in x or obs_key in y for obs_key in adata.obs.keys()]
):
obs_keys = [
k
for k in adata.obs.keys()
if not isinstance(adata.obs[k][0], str)
]
obs = adata.obs[obs_keys]
x = obs.astype(np.float32).eval(x)
y = obs.astype(np.float32).eval(y)
else:
raise ValueError(
"x or y is invalid! pass valid observation or a gene name"
)
x, y = make_dense(x).flatten(), make_dense(y).flatten()
# convolve along x axes (e.g. pseudotime)
if n_convolve is not None:
vec_conv = | np.ones(n_convolve) | numpy.ones |
import numpy as np
from sklearn.pipeline import Pipeline
from models.model import Model, ArrayLike
from preprocess.report_data import ReportData
from preprocess.report_data_d import ColName
from training.description_classification.utils import load_svm, SVMPipeline
class SVMDescriptionClf(Model[SVMPipeline]):
"""Complement Naive Bayes model for description classification."""
_model: Pipeline
def __init__(self):
self._model = load_svm()
def predict(self, X: ArrayLike) -> np.ndarray:
"""Predict the primary incident type of the given descriptions.
Params:
X: 1D array-like of descriptions to classify
Returns:
1D array of `IncidentType` predictions for the given descriptions.
"""
predictions = self._model.predict(X)
return | np.array([prediction for prediction in predictions]) | numpy.array |
"""
.. autoclass:: Peeler
:members:
"""
import os
import json
from collections import OrderedDict, namedtuple
import time
import pdb
import numpy as np
import scipy.signal
from scipy.spatial.distance import minkowski, chebyshev
import dill as pickle
from .catalogueconstructor import CatalogueConstructor
from . import signalpreprocessor
from .peakdetector import detect_peaks_in_chunk
from .tools import make_color_dict
from tqdm import tqdm
from . import pythran_tools
if hasattr(pythran_tools, '__pythran__'):
HAVE_PYTHRAN = True
else:
HAVE_PYTHRAN = False
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
_dtype_spike = [('index', 'int64'), ('cluster_label', 'int64'), ('jitter', 'float64'), ('feature_distance', 'float64')]
Spike = namedtuple('Spike', ('index', 'cluster_label', 'jitter', 'feature_distance'))
from .labelcodes import (LABEL_TRASH, LABEL_UNCLASSIFIED, LABEL_ALIEN)
LABEL_LEFT_LIMIT = -11
LABEL_RIGHT_LIMIT = -12
LABEL_MAXIMUM_SHIFT = -13
# good label are >=0
#~ maximum_jitter_shift = 10
maximum_jitter_shift = 4
#~ maximum_jitter_shift = 1
class Peeler(OpenCL_Helper):
"""
The peeler is core of spike sorting itself.
It basically do a *template matching* on a signals.
This class nedd a *catalogue* constructed by :class:`CatalogueConstructor`.
Then the compting is applied chunk chunk on the raw signal itself.
So this class is the same for both offline/online computing.
At each chunk, the algo is basically this one:
1. apply the processing chain (filter, normamlize, ....)
2. Detect peaks
3. Try to classify peak and detect the *jitter*
4. With labeled peak create a prediction for the chunk
5. Substract the prediction from the processed signals.
6. Go back to **2** until there is no peak or only peaks that can't be labeled.
7. return labeld spikes from this or previous chunk and the processed signals (for display or recoding)
The main difficulty in the implemtation is to deal with edge because spikes
waveforms can spread out in between 2 chunk.
Note that the global latency depend on this é paramters:
* lostfront_chunksize
* chunksize
"""
def __init__(self, dataio):
#for online dataio is None
self.dataio = dataio
def __repr__(self):
t = "Peeler <id: {}> \n workdir: {}\n".format(id(self), self.dataio.dirname)
return t
def change_params(
self, catalogue=None, chunksize=1024,
internal_dtype='float32',
use_sparse_template=False,
sparse_threshold_mad=1.5,
shape_distance_threshold=2,
shape_boundary_threshold=4,
energy_reduction_threshold=0,
confidence_threshold=0.6,
n_max_passes=3,
debugging=False,
use_opencl_with_sparse=False,
use_pythran_with_sparse=False,
cl_platform_index=None,
cl_device_index=None,
):
"""
Set parameters for the Peeler.
Parameters
----------
catalogue: the catalogue (a dict)
The catalogue made by CatalogueConstructor.
chunksize: int (1024 by default)
the size of chunk for processing.
internal_dtype: 'float32' or 'float64'
dtype of internal processing. float32 is OK. float64 is totally useless.
use_sparse_template: bool (dafult False)
For very high channel count, centroids from catalogue can be sparcifyed.
The speedup a lot the process but the sparse_threshold_mad must be
set carrefully and compared with use_sparse_template=False.
For low channel count this is useless.
sparse_threshold_mad: float (1.5 by default)
The threshold level.
Under this value if all sample on one channel for one centroid
is considred as NaN
use_opencl_with_sparse: bool
When use_sparse_template is True, you can use this to accelerate
the labelling of each spike. Usefull for high channel count.
use_pythran_with_sparse: bool
experimental same as use_opencl_with_sparse but with pythran
"""
assert catalogue is not None
self.catalogue = catalogue
self.chunksize = chunksize
self.internal_dtype = internal_dtype
self.use_sparse_template = use_sparse_template
self.sparse_threshold_mad = sparse_threshold_mad
self.use_opencl_with_sparse = use_opencl_with_sparse
self.use_pythran_with_sparse = use_pythran_with_sparse
# RD 03/20/2019
self.distance_order = 1
if shape_distance_threshold is None:
self.shape_distance_threshold = 5
else:
self.shape_distance_threshold = shape_distance_threshold
# RD 05/15/2019
if shape_boundary_threshold is None:
self.shape_boundary_threshold = 10
else:
self.shape_boundary_threshold = shape_boundary_threshold
if energy_reduction_threshold is None:
self.energy_reduction_threshold = 0
else:
self.energy_reduction_threshold = energy_reduction_threshold
# RD 07/25/2019
self.n_max_passes = n_max_passes
# RD 01/06/2021
ccFolderName = os.path.join(
self.dataio.dirname,
'channel_group_{}'.format(catalogue['chan_grp']),
'catalogue_constructor')
projectorPath = os.path.join(
ccFolderName, 'projector.pickle')
# TODO: supervised projector
supervisedProjectorPath = os.path.join(
ccFolderName, 'supervised_projector.pickle')
if os.path.exists(supervisedProjectorPath):
with open(supervisedProjectorPath, 'rb') as f:
self.projector = pickle.load(f)['projector']
elif os.path.exists(projectorPath):
with open(projectorPath, 'rb') as f:
self.projector = pickle.load(f)['projector']
if 'GlobalPUMAP' in self.projector.__repr__():
from umap.parametric_umap import ParametricUMAP, load_ParametricUMAP
import tensorflow as tf
tf.keras.backend.clear_session()
if os.path.exists(supervisedProjectorPath):
tfUmap = load_ParametricUMAP(
os.path.join(ccFolderName, 'supervised-umap'), useConfigAndWeights=True)
else:
tfUmap = load_ParametricUMAP(
os.path.join(ccFolderName, 'umap'), useConfigAndWeights=True)
self.projector.umap = tfUmap
classifierPath = os.path.join(
ccFolderName, 'classifier.pickle')
if os.path.exists(classifierPath):
with open(classifierPath, 'rb') as f:
self.classifier = pickle.load(f)['classifier']
if confidence_threshold is not None:
self.confidence_threshold = confidence_threshold
else:
self.confidence_threshold = 0
else:
self.classifier = None
self.confidence_threshold = 0
# evr = self.projector.explained_variance_ratio_
# cum_evr = np.cumsum(evr)
# self.variance_cutoff = 0.75
# self.feature_mask = cum_evr < self.variance_cutoff
# self.feature_mask[0] = True
# self.feature_window = None
# self.feature_window = evr[self.feature_mask] / np.sum(evr[self.feature_mask])
self.feature_mask = np.ones((self.projector.n_components), dtype=np.bool)
self.feature_window = np.ones((self.feature_mask.sum())) / self.feature_mask.sum()
#####
window1 = scipy.signal.triang(2 * int(-self.catalogue['n_left']) + 1)
window2 = scipy.signal.triang(2 * int(self.catalogue['n_right']) + 1)
window = np.concatenate(
(
window1[:int(-self.catalogue['n_left'])],
window2[int(self.catalogue['n_right']) + 1:]),
axis=-1)
discountEdges = False
if discountEdges:
# discount edges a lot
window[window < 0.5] = 0.1
# normalize to sum 1, so that the distance is an average
# deviation
self.distance_window = (window) / np.sum(window)
# create a boundary around the mean prediction
# self.boundary_window = window
self.debugging = debugging
nClusters = catalogue['centers0'].shape[0]
self.catalogue.update(
{'template_distances': [[] for i in range(nClusters)]})
self.catalogue.update(
{'energy_reductions': [[] for i in range(nClusters)]})
self.catalogue.update(
{'feat_distances': [[] for i in range(nClusters)]})
self.catalogue.update(
{'resid_energies': [[] for i in range(nClusters)]})
self.catalogue.update(
{'classifier_confidences': [[] for i in range(nClusters)]})
self.catalogue.update(
{'tallyPlots': 0})
# end RD Mods
# Some check
if self.use_opencl_with_sparse or self.use_pythran_with_sparse:
assert self.use_sparse_template, 'For that option you must use sparse template'
if self.use_sparse_template:
assert self.use_opencl_with_sparse or self.use_pythran_with_sparse, 'For that option you must use OpenCL or Pytran'
if self.use_opencl_with_sparse:
assert HAVE_PYOPENCL, 'OpenCL is not available'
if self.use_pythran_with_sparse:
assert HAVE_PYTHRAN, 'Pythran is not available'
self.colors = make_color_dict(self.catalogue['clusters'])
# precompute some value for jitter estimation
n = self.catalogue['cluster_labels'].size
self.catalogue['wf1_norm2'] = np.zeros(n)
self.catalogue['wf2_norm2'] = np.zeros(n)
self.catalogue['wf1_dot_wf2'] = np.zeros(n)
for i, k in enumerate(self.catalogue['cluster_labels']):
chan = self.catalogue['max_on_channel'][i]
wf0 = self.catalogue['centers0'][i,: , chan]
wf1 = self.catalogue['centers1'][i,: , chan]
wf2 = self.catalogue['centers2'][i,: , chan]
self.catalogue['wf1_norm2'][i] = wf1.dot(wf1)
self.catalogue['wf2_norm2'][i] = wf2.dot(wf2)
self.catalogue['wf1_dot_wf2'][i] = wf1.dot(wf2)
if self.use_sparse_template:
centers = wf0 = self.catalogue['centers0']
#~ print(centers.shape)
mask = np.any(np.abs(centers)>sparse_threshold_mad, axis=1)
#~ print(mask.shape)
#~ print(mask)
print('average sparseness for templates', np.sum(mask)/mask.size)
self.catalogue['sparse_mask'] = mask
#~ for i in range(centers.shape[0]):
#~ fig, ax = plt.subplots()
#~ center = centers[i,:,:].copy()
#~ center_sparse = center.copy()
#~ center_sparse[:, ~mask[i, :]] = 0.
#~ ax.plot(center.T.flatten(), color='g')
#~ ax.plot(center_sparse.T.flatten(), color='r', ls='--')
#~ ax.axhline(sparse_threshold_mad)
#~ ax.axhline(-sparse_threshold_mad)
#~ plt.show()
if self.use_opencl_with_sparse:
OpenCL_Helper.initialize_opencl(self, cl_platform_index=cl_platform_index, cl_device_index=cl_device_index)
#~ self.ctx = pyopencl.create_some_context(interactive=False)
#~ self.queue = pyopencl.CommandQueue(self.ctx)
centers = self.catalogue['centers0']
nb_channel = centers.shape[2]
peak_width = centers.shape[1]
nb_cluster = centers.shape[0]
kernel = kernel_opencl%{'nb_channel': nb_channel,'peak_width':peak_width,
'total':peak_width*nb_channel,'nb_cluster' : nb_cluster}
prg = pyopencl.Program(self.ctx, kernel)
opencl_prg = prg.build(options='-cl-mad-enable')
self.kern_waveform_distance = getattr(opencl_prg, 'waveform_distance')
wf_shape = centers.shape[1:]
one_waveform = np.zeros(wf_shape, dtype='float32')
self.one_waveform_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=one_waveform)
self.catalogue_center_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=centers)
self.waveform_distance = np.zeros((nb_cluster), dtype='float32')
self.waveform_distance_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=self.waveform_distance)
#~ mask[:] = 0
self.mask_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=mask.astype('u1'))
rms_waveform_channel = np.zeros(nb_channel, dtype='float32')
self.rms_waveform_channel_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=rms_waveform_channel)
self.cl_global_size = (centers.shape[0], centers.shape[2])
#~ self.cl_local_size = None
self.cl_local_size = (centers.shape[0], 1) # faster a GPU because of memory access
#~ self.cl_local_size = (1, centers.shape[2])
def process_one_chunk(self, pos, sigs_chunk):
#~ print('*'*5)
#~ print('chunksize', self.chunksize, '=', self.chunksize/self.sample_rate*1000, 'ms')
#~ t1 = time.perf_counter()
abs_head_index, preprocessed_chunk = self.signalpreprocessor.process_data(pos, sigs_chunk)
#~ t2 = time.perf_counter()
#~ print('process_data', (t2-t1)*1000)
#note abs_head_index is smaller than pos because prepcorcessed chunk
# is late because of local filfilt in signalpreprocessor
#shift rsiruals buffer and put the new one on right side
#~ t1 = time.perf_counter()
fifo_roll_size = self.fifo_residuals.shape[0]-preprocessed_chunk.shape[0]
if fifo_roll_size>0 and fifo_roll_size!=self.fifo_residuals.shape[0]:
self.fifo_residuals[:fifo_roll_size,:] = self.fifo_residuals[-fifo_roll_size:,:]
self.fifo_residuals[fifo_roll_size:,:] = preprocessed_chunk
#~ t2 = time.perf_counter()
#~ print('fifo move', (t2-t1)*1000.)
# relation between inside chunk index and abs index
shift = abs_head_index - self.fifo_residuals.shape[0]
# TODO remove from peak the very begining of the signal because of border filtering effects
#~ t1 = time.perf_counter()
good_spikes = []
all_ready_tested = []
passes_counter = 0
while True:
#detect peaks
# t3 = time.perf_counter()
local_peaks = detect_peaks_in_chunk(self.fifo_residuals, self.n_span, self.relative_threshold, self.peak_sign)
# t4 = time.perf_counter()
#~ print('self.fifo_residuals median', np.median(self.fifo_residuals, axis=0))
#~ print(' detect_peaks_in_chunk', (t4-t3)*1000.)
if len(all_ready_tested)>0:
local_peaks_to_check = local_peaks[~np.in1d(local_peaks, all_ready_tested)]
else:
local_peaks_to_check = local_peaks
n_ok = 0
prints_counter = 0
for i, local_peak in enumerate(local_peaks_to_check):
#~ print(' local_peak', local_peak, 'i', i)
#~ t3 = time.perf_counter()
pctDone = np.floor(100 * i/local_peaks_to_check.shape[0])
if pctDone >= prints_counter:
print(' Peeler, pass {}: {} pct. done...'.format(passes_counter+1, pctDone))
prints_counter += 20
spike = self.classify_and_align_one_spike(local_peak, self.fifo_residuals, self.catalogue)
#~ t4 = time.perf_counter()
#~ print(' classify_and_align_one_spike', (t4-t3)*1000.)
if spike.cluster_label>=0:
#~ t3 = time.perf_counter()
#~ print(' >>spike.index', spike.index, spike.cluster_label, 'abs index', spike.index+shift)
spikes = np.array([spike], dtype=_dtype_spike)
prediction = make_prediction_signals(spikes, self.fifo_residuals.dtype, self.fifo_residuals.shape, self.catalogue, safe=False)
self.fifo_residuals -= prediction
spikes['index'] += shift
good_spikes.append(spikes)
if passes_counter < self.n_max_passes - 1:
n_ok += 1
#~ t4 = time.perf_counter()
#~ print(' make_prediction_signals and sub', (t4-t3)*1000.)
#~ print(' all_ready_tested before', all_ready_tested)
all_ready_tested = [ind for ind in all_ready_tested if np.abs(spike.index-ind)>self.peak_width]
#~ print(' all_ready_tested new deal', all_ready_tested)
else:
all_ready_tested.append(local_peak)
#
passes_counter += 1
#
if n_ok==0:
# no peak can be labeled
# reserve bad spikes on the right limit for next time
local_peaks = local_peaks[local_peaks<(self.chunksize+self.n_span)]
bad_spikes = np.zeros(local_peaks.shape[0], dtype=_dtype_spike)
bad_spikes['index'] = local_peaks + shift
bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
break
#~ t2 = time.perf_counter()
#~ print('LOOP classify_and_align_one_spike', (t2-t1)*1000)
#concatenate, sort and count
# here the trick is to keep spikes at the right border
# and keep then until the next loop this avoid unordered spike
if len(good_spikes)>0:
good_spikes = np.concatenate(good_spikes)
near_border = (good_spikes['index'] - shift)>=(self.chunksize+self.n_span)
near_border_good_spikes = good_spikes[near_border].copy()
good_spikes = good_spikes[~near_border]
all_spikes = np.concatenate([good_spikes] + [bad_spikes] + self.near_border_good_spikes)
self.near_border_good_spikes = [near_border_good_spikes] # for next chunk
else:
all_spikes = np.concatenate([bad_spikes] + self.near_border_good_spikes)
self.near_border_good_spikes = []
# all_spikes = all_spikes[np.argsort(all_spikes['index'])]
all_spikes = all_spikes.take(np.argsort(all_spikes['index']))
self.total_spike += all_spikes.size
return abs_head_index, preprocessed_chunk, self.total_spike, all_spikes
def _initialize_before_each_segment(self, sample_rate=None, nb_channel=None, source_dtype=None):
self.nb_channel = nb_channel
self.sample_rate = sample_rate
self.source_dtype = source_dtype
self.signalpreprocessor_engine = self.catalogue['signal_preprocessor_params']['signalpreprocessor_engine']
#~ print('self.signalpreprocessor_engine', self.signalpreprocessor_engine)
SignalPreprocessor_class = signalpreprocessor.signalpreprocessor_engines[self.signalpreprocessor_engine]
#~ SignalPreprocessor_class = signalpreprocessor.signalpreprocessor_engines['numpy']
self.signalpreprocessor = SignalPreprocessor_class(sample_rate, nb_channel, self.chunksize, source_dtype)
p = dict(self.catalogue['signal_preprocessor_params'])
p.pop('signalpreprocessor_engine')
p['normalize'] = True
p['signals_medians'] = self.catalogue['signals_medians']
p['signals_mads'] = self.catalogue['signals_mads']
self.signalpreprocessor.change_params(**p)
assert self.chunksize>self.signalpreprocessor.lostfront_chunksize, 'lostfront_chunksize ({}) is greater than chunksize ({})!'.format(self.signalpreprocessor.lostfront_chunksize, self.chunksize)
self.internal_dtype = self.signalpreprocessor.output_dtype
self.peak_sign = self.catalogue['peak_detector_params']['peak_sign']
self.relative_threshold = self.catalogue['peak_detector_params']['relative_threshold']
peak_span = self.catalogue['peak_detector_params']['peak_span']
self.n_span = int(sample_rate*peak_span)//2
self.n_span = max(1, self.n_span)
self.peak_width = self.catalogue['peak_width']
self.n_side = self.catalogue['peak_width'] + maximum_jitter_shift + self.n_span + 1
assert self.chunksize > (self.n_side+1), 'chunksize is too small because of n_size'
self.alien_value_threshold = self.catalogue['clean_waveforms_params']['alien_value_threshold']
self.total_spike = 0
self.near_border_good_spikes = []
self.fifo_residuals = np.zeros((self.n_side+self.chunksize, nb_channel),
dtype=self.internal_dtype)
def initialize_online_loop(self, sample_rate=None, nb_channel=None, source_dtype=None):
self._initialize_before_each_segment(sample_rate=sample_rate, nb_channel=nb_channel, source_dtype=source_dtype)
def run_offline_loop_one_segment(self, seg_num=0, duration=None, progressbar=True):
chan_grp = self.catalogue['chan_grp']
kargs = {}
kargs['sample_rate'] = self.dataio.sample_rate
kargs['nb_channel'] = self.dataio.nb_channel(chan_grp)
kargs['source_dtype'] = self.dataio.source_dtype
self._initialize_before_each_segment(**kargs)
if duration is not None:
length = int(duration*self.dataio.sample_rate)
else:
length = self.dataio.get_segment_length(seg_num)
#~ length -= length%self.chunksize
#initialize engines
self.dataio.reset_processed_signals(seg_num=seg_num, chan_grp=chan_grp, dtype=self.internal_dtype)
self.dataio.reset_spikes(seg_num=seg_num, chan_grp=chan_grp, dtype=_dtype_spike)
iterator = self.dataio.iter_over_chunk(
seg_num=seg_num, chan_grp=chan_grp, chunksize=self.chunksize,
i_stop=length, signal_type='initial')
if progressbar:
iterator = tqdm(iterable=iterator, total=length//self.chunksize)
for pos, sigs_chunk in iterator:
if not progressbar:
pctDone = np.floor(100 * pos/length)
print('Peeler: on chunk {} of {} ({} pct.)'.format(pos//self.chunksize, length//self.chunksize, pctDone))
sig_index, preprocessed_chunk, total_spike, spikes = self.process_one_chunk(pos, sigs_chunk)
if sig_index<=0:
continue
# save preprocessed_chunk to file
self.dataio.set_signals_chunk(preprocessed_chunk, seg_num=seg_num, chan_grp=chan_grp,
i_start=sig_index-preprocessed_chunk.shape[0], i_stop=sig_index,
signal_type='processed')
if spikes is not None and spikes.size>0:
self.dataio.append_spikes(seg_num=seg_num, chan_grp=chan_grp, spikes=spikes)
if len(self.near_border_good_spikes)>0:
# deal with extra remaining spikes
extra_spikes = self.near_border_good_spikes[0]
extra_spikes = extra_spikes.take(np.argsort(extra_spikes['index']))
self.total_spike += extra_spikes.size
if extra_spikes.size>0:
self.dataio.append_spikes(seg_num=seg_num, chan_grp=chan_grp, spikes=extra_spikes)
self.dataio.flush_processed_signals(seg_num=seg_num, chan_grp=chan_grp)
self.dataio.flush_spikes(seg_num=seg_num, chan_grp=chan_grp)
if self.debugging:
sns.set_style('whitegrid')
fig, ax = plt.subplots(1, 5)
fig.set_size_inches(20, 4)
chanTitle = 'Chan_grp {}'.format(self.catalogue['chan_grp'])
# print(chanTitle)
for idx, distList in enumerate(self.catalogue['template_distances']):
try:
theseDist = np.array(distList)
this95 = (
np.nanmean(theseDist) +
2 * np.nanstd(theseDist))
summaryText = 'clus {}, 95% < {}, {} total'.format(idx, this95, len(theseDist))
sns.distplot(
theseDist, ax=ax[0],
label=summaryText, bins=np.arange(0, 5, 0.2))
ax[0].set_xlim([0, 5])
ax[0].set_xlabel('Weighted distance to template')
ax[0].set_ylabel('Count (normalized)')
##########
theseEn = np.array(self.catalogue['energy_reductions'][idx])
this95 = (
np.nanmean(theseEn) +
2 * np.nanstd(theseEn))
summaryText = 'clus {}, 95% < {}, {} total'.format(idx, this95, len(theseEn))
sns.distplot(
theseEn, ax=ax[1],
label=summaryText)
# ax[1].set_xlim([0, 100])
# print(summaryText)
ax[1].set_xlabel('energy reduction')
ax[1].set_ylabel('Count (normalized)')
######################
theseFeat = np.array(self.catalogue['feat_distances'][idx])
this95 = (
np.nanmean(theseFeat) +
2 * np.nanstd(theseFeat))
summaryText = 'clus {}, 95% < {}, {} total'.format(idx, this95, len(theseFeat))
sns.distplot(
theseFeat, ax=ax[2],
label=summaryText,
bins=np.arange(0, 5, 0.2)
)
ax[2].set_xlim([0, 5])
print(summaryText)
ax[2].set_xlabel('Feature distances from template')
ax[2].set_ylabel('Count (normalized)')
#
##############
theseWfEns = np.array(self.catalogue['resid_energies'][idx])
this95 = (
np.nanmean(theseWfEns) +
2 * np.nanstd(theseWfEns))
summaryText = 'clus {}, 95% < {}, {} total'.format(idx, this95, len(theseWfEns))
sns.distplot(
theseWfEns, ax=ax[3],
label=summaryText,
# bins=np.arange(0, 5, 0.2)
)
# ax[2].set_xlim([0, 5])
ax[3].set_xlabel('Squared sum of residual waveform')
ax[3].set_ylabel('Count (normalized)')
###########
theseConfs = np.array(self.catalogue['classifier_confidences'][idx])
this95 = (
np.nanmean(theseConfs) +
2 * np.nanstd(theseConfs))
summaryText = 'clus {}, 95% < {}, {} total'.format(idx, this95, len(theseConfs))
sns.distplot(
theseConfs, ax=ax[4],
label=summaryText,
# bins=np.arange(0, 5, 0.2)
)
# ax[2].set_xlim([0, 5])
ax[4].set_xlabel('Classifier confidence values')
ax[4].set_ylabel('Count (normalized)')
except Exception:
print('Error in peeler.run_offline_loop_one_segment( diagnostic plots')
plt.legend()
plt.title(chanTitle)
histPNGName = os.path.join(
self.dataio.dirname,
'templateHist_{}.png'.format(self.catalogue['chan_grp']))
plt.savefig(histPNGName)
plt.close()
def run_offline_all_segment(self, **kargs):
#TODO remove chan_grp here because it is redundant from catalogue['chan_grp']
assert hasattr(self, 'catalogue'), 'So peeler.change_params first'
#~ print('run_offline_all_segment', chan_grp)
for seg_num in range(self.dataio.nb_segment):
self.run_offline_loop_one_segment(seg_num=seg_num, **kargs)
run = run_offline_all_segment
def classify_and_align_one_spike(self, local_index, residual, catalogue):
# local_index is index of peaks inside residual and not
# the absolute peak_pos. So time scaling must be done outside.
width = catalogue['peak_width']
n_left = catalogue['n_left']
#~ alien_value_threshold = catalogue['clean_waveforms_params']['alien_value_threshold']
#ind is the windows border!!!!!
ind = local_index + n_left
if ind+width+maximum_jitter_shift+1>=residual.shape[0]:
# too near right limits no label
label = LABEL_RIGHT_LIMIT
jitter = 0
feature_distance = 0
elif ind<=maximum_jitter_shift:
# too near left limits no label
#~ print(' LABEL_LEFT_LIMIT', ind)
label = LABEL_LEFT_LIMIT
jitter = 0
feature_distance = 0
elif catalogue['centers0'].shape[0]==0:
# empty catalogue
label = LABEL_UNCLASSIFIED
jitter = 0
feature_distance = 0
else:
waveform = residual[ind:ind+width,:]
if self.alien_value_threshold is not None and \
np.any(np.abs(waveform)>self.alien_value_threshold) :
label = LABEL_ALIEN
jitter = 0
feature_distance = 0
else:
#~ t1 = time.perf_counter()
label, jitter, feature_distance = self.estimate_one_jitter(waveform)
#~ t2 = time.perf_counter()
#~ print(' estimate_one_jitter', (t2-t1)*1000.)
#~ jitter = -jitter
#TODO debug jitter sign is positive on right and negative to left
#~ print('label, jitter', label, jitter)
# if more than one sample of jitter
# then we try a peak shift
# take it if better
#TODO debug peak shift
if np.abs(jitter) > 0.5 and label >=0:
prev_ind, prev_label, prev_jitter = ind, label, jitter
shift = -int(np.round(jitter))
#~ print('classify and align shift', shift)
if np.abs(shift) >maximum_jitter_shift:
#~ print(' LABEL_MAXIMUM_SHIFT avec shift')
label = LABEL_MAXIMUM_SHIFT
else:
ind = ind + shift
if ind+width>=residual.shape[0]:
#~ print(' LABEL_RIGHT_LIMIT avec shift')
label = LABEL_RIGHT_LIMIT
elif ind<0:
#~ print(' LABEL_LEFT_LIMIT avec shift')
label = LABEL_LEFT_LIMIT
#TODO: force to label anyway the spike if spike is at the left of FIFO
else:
waveform = residual[ind:ind+width,:]
new_label, new_jitter, new_feature_distance = self.estimate_one_jitter(waveform)
if np.abs(new_jitter)<np.abs(prev_jitter):
#~ print('keep shift')
label, jitter, feature_distance = new_label, new_jitter, new_feature_distance
local_index += shift
else:
#~ print('no keep shift worst jitter')
pass
#security if with jitter the index is out
if label>=0:
local_pos = local_index - np.round(jitter).astype('int64') + n_left
if local_pos<0:
label = LABEL_LEFT_LIMIT
elif (local_pos+width) >=residual.shape[0]:
label = LABEL_RIGHT_LIMIT
return Spike(local_index, label, jitter, feature_distance)
def estimate_one_jitter(self, waveform):
"""
Estimate the jitter for one peak given its waveform
Method proposed by <NAME> see:
https://hal.archives-ouvertes.fr/hal-01111654v1
http://christophe-pouzat.github.io/LASCON2016/SpikeSortingTheElementaryWay.html
for best reading (at least for me SG):
* wf = the wafeform of the peak
* k = cluster label of the peak
* wf0, wf1, wf2 : center of catalogue[k] + first + second derivative
* jitter0 : jitter estimation at order 0
* jitter1 : jitter estimation at order 1
* h0_norm2: error at order0
* h1_norm2: error at order1
* h2_norm2: error at order2
"""
# This line is the slower part !!!!!!
# cluster_idx = np.argmin(np.sum(np.sum((catalogue['centers0']-waveform)**2, axis = 1), axis = 1))
catalogue = self.catalogue
if self.use_opencl_with_sparse:
rms_waveform_channel = np.sum(waveform**2, axis=0).astype('float32')
pyopencl.enqueue_copy(self.queue, self.one_waveform_cl, waveform)
pyopencl.enqueue_copy(self.queue, self.rms_waveform_channel_cl, rms_waveform_channel)
event = self.kern_waveform_distance(self.queue, self.cl_global_size, self.cl_local_size,
self.one_waveform_cl, self.catalogue_center_cl, self.mask_cl,
self.rms_waveform_channel_cl, self.waveform_distance_cl)
pyopencl.enqueue_copy(self.queue, self.waveform_distance, self.waveform_distance_cl)
cluster_idx = np.argmin(self.waveform_distance)
elif self.use_pythran_with_sparse:
s = pythran_tools.pythran_loop_sparse_dist(waveform,
catalogue['centers0'], catalogue['sparse_mask'])
cluster_idx = | np.argmin(s) | numpy.argmin |
import argparse
import os
import logging
import string
import sys
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from .transform import (
sequence_embedding,
normalize, denormalize,
one_hot_encode_classes,
split_train_test_set,
)
from .model import (
conv1d_regression_model,
conv1d_densenet_regression_model,
compile_regression_model,
MeanAbsoluteError,
)
from .load import load_dataset
from .utilities import SaveModelCallback, generate_random_run_id
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('model_type', choices=['conv1d', 'conv1d_densenet'])
parser.add_argument('--run_id', type=str, default=None)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_epochs', type=int, default=10)
args = parser.parse_args()
model_type = args.model_type
run_id = args.run_id
resume = args.resume
learning_rate = args.learning_rate
batch_size = args.batch_size
n_epochs = args.n_epochs
if run_id is None and resume:
logger.error('Specify --run_id to resume run')
sys.exit(1)
elif run_id is None and not resume:
run_id = generate_random_run_id()
logger.info(f'Run {run_id}')
input_path = os.path.join(os.getcwd(), 'data/dataset_train.csv')
output_folder = os.path.join(os.getcwd(), f'saved_models_regression/{run_id}/')
model_path = os.path.join(output_folder, f'model.h5')
metadata_path = os.path.join(output_folder, f'metadata.json')
log_dir = os.path.join(os.getcwd(), f'summary_log/regression/{run_id}')
for dir_path in [output_folder, log_dir]:
try:
os.makedirs(output_folder)
except FileExistsError:
pass
alphabet = ['A', 'T', 'G', 'C']
if resume:
with open(metadata_path, 'r') as f:
metadata = json.load(f)
elif model_type == 'conv1d':
metadata = {
'run_id': run_id,
'alphabet': alphabet,
'model_type': model_type,
'n_epochs': 0,
'n_conv_1': 3,
'n_filters_1': 88,
'kernel_size_1': 29,
'n_conv_2': 1,
'n_filters_2': 54,
'kernel_size_2': 44,
'l2_reg': 1e-4,
'dropout': 0.5,
'seed': | np.random.randint(0, 9999) | numpy.random.randint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.