prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
# see function mfcc.m from Slaneys Auditory Toolbox
def ToolMfccFb (iFftLength, f_s):
# initialization
f_start = 133.3333
iNumLinFilters = 13
iNumLogFilters = 27
iNumFilters = iNumLinFilters + iNumLogFilters
linearSpacing = 66.66666666
logSpacing = 1.0711703
# compute band frequencies
f = np.zeros(iNumFilters+2)
f[np.arange(0,iNumLinFilters)] = f_start + np.arange(0,iNumLinFilters)*linearSpacing
f[np.arange(iNumLinFilters,iNumFilters+2)] = f[iNumLinFilters-1] * logSpacing**np.arange(1,iNumLogFilters+3)
# sanity check
if f[iNumLinFilters-1]>=f_s/2:
f = f[f<f_s/2]
iNumFilters = f.shape[0] - 2
f_l = f[np.arange(0,iNumFilters)]
f_c = f[np.arange(1,iNumFilters+1)]
f_u = f[np.arange(2,iNumFilters+2)]
# allocate memory for filters and set max amplitude
H = np.zeros([iNumFilters,iFftLength])
afFilterMax = 2 / (f_u - f_l)
f_k = np.arange(0,iFftLength)/(iFftLength-1)*f_s/2
# compute the transfer functions
for c in range(0,iNumFilters):
#lower filter slope
i_l = np.argmax(f_k>f_l[c])
i_u = np.max([0, np.argmin(f_k <= f_c[c])-1])
H[c,np.arange(i_l, i_u+1)] = afFilterMax[c] * (f_k[np.arange(i_l, i_u+1)]-f_l[c])/(f_c[c]-f_l[c])
#upper filter slope
i_l = i_u + 1
i_u = np.max([0, np.argmin(f_k < f_u[c])-1])
H[c, | np.arange(i_l, i_u+1) | numpy.arange |
"""Dataset loaders for MNIST, fashion MNIST, and Genz time series"""
import os
from math import floor
import numpy as np
import torch
@torch.no_grad()
def bin_data(input, num_bins=None):
"""
Discretize greyscale values into a finite number of bins
"""
if num_bins is None:
return input
assert num_bins > 0
# Set each of the corresponding bin indices
out_data = torch.full_like(input, -1)
for i in range(num_bins):
bin_inds = (i / num_bins <= input) * (input <= (i + 1) / num_bins)
out_data[bin_inds] = i
assert out_data.max() >= 0
return out_data.long()
def load_genz(genz_num: int, slice_len=None):
"""
Load a dataset of time series with dynamics set by various Genz functions
Separate train, validation, and test datasets are returned, containing data
from 8000, 1000, and 1000 time series. The length of each time series
depends on `slice_len`, and is 100 by default (`slice_len=None`). For
positive integer values of `slice_len`, these time series are split into
contiguous chunks of length equal to `slice_len`.
Args:
genz_num: Integer between 1 and 6 setting choice of Genz function
Returns:
train, val, test: Three arrays with respective shape (8000, 100, 1),
(1000, 100, 1), and (1000, 100, 1).
"""
# Length between startpoints of output sliced series
stride = 2
assert 1 <= genz_num <= 6
assert slice_len is None or 1 < slice_len <= 100
if slice_len is None:
slice_suffix = ""
slice_len = 100
else:
assert isinstance(slice_len, int)
slice_suffix = f"_l{slice_len}_s{stride}"
# Number of slices per time series
s_per_ts = (100 - slice_len) // stride + 1
# Return saved dataset if we have already generated this previously
save_file = f"datasets/genz/genz{genz_num}{slice_suffix}.npz"
if os.path.isfile(save_file):
out = np.load(save_file)
train, val, test = out["train"], out["val"], out["test"]
assert val.shape == test.shape == (1000 * s_per_ts, slice_len)
assert train.shape == (8000 * s_per_ts, slice_len)
return train, val, test
# Definitions of each of the Genz functions which drive the time series
gfun = genz_funs[genz_num]
# Initialize random starting values and update using Genz update function
rng = np.random.default_rng(genz_num)
x = rng.permutation(np.linspace(0.0, 1.0, num=10000))
long_series = np.empty((10000, 100))
for i in range(100):
x = gfun(x)
long_series[:, i] = x
# Normalize the time series values to lie in range [0, 1]
min_val, max_val = long_series.min(), long_series.max()
long_series = (long_series - min_val) / (max_val - min_val)
# Split into train, validation, and test sets
base_series = (long_series[:8000], long_series[8000:9000], long_series[9000:])
# Cut up the full time series into shorter sliced time series
all_series = []
for split in base_series:
num_series = split.shape[0]
s_split = np.empty((num_series * s_per_ts, slice_len))
for i in range(s_per_ts):
j = i * stride
s_split[i * num_series : (i + 1) * num_series] = split[
:, j : (j + slice_len)
]
all_series.append(s_split)
# Shuffle individual time series, save everything to disk
train, val, test = [rng.permutation(ts) for ts in all_series]
np.savez_compressed(save_file, train=train, val=val, test=test)
return train, val, test
def bars_and_stripes(width=10, max_size=12000, seed=0):
"""
Generate images from bars and stripes dataset
Note that *all* images are generated before a subset are selected, so
choosing height/width too large will lead to a long runtime
Args:
width (int): Width (and height) of square B&S images
max_size (int): Maximum number of images in all returned splits
seed (int): Random seed for reproducibility
Returns:
bs_data (Tensor): Flattened integer-valued bars and stripes
data, with shape (num_output, width**2)
"""
width = int(width)
num_total = 2 ** (width + 1) - 2
num_output = min(num_total, max_size)
# Create bit masks which will be used to define bar/stripe patterns
patterns = np.arange(2 ** width)
filters = np.arange(width)
bit_masks = (((patterns[:, np.newaxis] & (1 << filters))) > 0).astype(int)
# Generate all 2**(width + 1) - 2 images using above bit masks
bs_data = np.zeros((num_total, width, width))
bs_data[: num_total // 2] = bit_masks[:-1, :, np.newaxis] # Bars
bs_data[num_total // 2 :] = bit_masks[1:, np.newaxis, :] # Stripes
# Shuffle dataset and determine size to output
bs_data = np.random.RandomState(seed).permutation(bs_data)
# Split dataset into train, val, and test
bs_data = bs_data[:num_output].reshape((num_output, -1)).astype("float32")
lrg, sml = floor(num_output * 10 / 12), floor(num_output * 1 / 12)
train, val, test = bs_data[:lrg], bs_data[lrg:lrg+sml], bs_data[lrg+sml:lrg+2*sml]
return train, val, test
w = 0.5
c = 1.0 # I'm using the fact that c=1.0 to set c**2 = c**-2 = c
genz_funs = [
None, # Placeholder to give 1-based indexing
lambda x: np.cos(2 * np.pi * w + c * x),
lambda x: (c + (x + w)) ** -1,
lambda x: (1 + c * x) ** -2,
lambda x: | np.exp(-c * np.pi * (x - w) ** 2) | numpy.exp |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import math
import os
from typing import List, Optional, Tuple, Union
import numpy as np
from augly import utils
from PIL import Image
JPEG_EXTENSIONS = [".jpg", ".JPG", ".jpeg", ".JPEG"]
def validate_and_load_image(image: Union[str, Image.Image]) -> Image.Image:
"""
If image is a str, loads the image as a PIL Image and returns it. Otherwise,
we assert that image is a PIL Image and then return it.
"""
if isinstance(image, str):
local_path = utils.pathmgr.get_local_path(image)
utils.validate_image_path(local_path)
return Image.open(local_path)
assert isinstance(
image, Image.Image
), "Expected type PIL.Image.Image for variable 'image'"
return image
def ret_and_save_image(
image: Image.Image, output_path: Optional[str], src_mode: Optional[str] = None
) -> Image.Image:
if src_mode is not None:
image = image.convert(src_mode)
if output_path is not None:
if any(output_path.endswith(extension) for extension in JPEG_EXTENSIONS):
image = image.convert("RGB")
utils.validate_output_path(output_path)
image.save(output_path)
return image
def get_template_and_bbox(
template_filepath: str, template_bboxes_filepath: str
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
template_key = os.path.basename(template_filepath)
local_template_path = utils.pathmgr.get_local_path(template_filepath)
template = Image.open(local_template_path)
local_bbox_path = utils.pathmgr.get_local_path(template_bboxes_filepath)
bbox = json.load(open(local_bbox_path, "rb"))[template_key]
return template, bbox
def rotated_rect_with_max_area(w: int, h: int, angle: float) -> Tuple[float, float]:
"""
Computes the width and height of the largest possible axis-aligned
rectangle (maximal area) within the rotated rectangle
source:
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders # noqa: B950
"""
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
sin_a = abs(math.sin(math.radians(angle)))
cos_a = abs(math.cos(math.radians(angle)))
if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
cos_2a = cos_a * cos_a - sin_a * sin_a
wr = (w * cos_a - h * sin_a) / cos_2a
hr = (h * cos_a - w * sin_a) / cos_2a
return wr, hr
def pad_with_black(src: Image.Image, w: int, h: int) -> Image.Image:
"""
Returns the image src with the x dimension padded to width w if it was
smaller than w (and likewise for the y dimension with height h)
"""
curr_w, curr_h = src.size
dx = max(0, (w - curr_w) // 2)
dy = max(0, (h - curr_h) // 2)
padded = Image.new("RGB", (w, h))
padded.paste(src, (dx, dy, curr_w + dx, curr_h + dy))
return padded
def resize_and_pad_to_given_size(
src: Image.Image, w: int, h: int, crop: bool
) -> Image.Image:
"""
Returns the image src resized & padded with black if needed for the screenshot
transformation (i.e. if the spot for the image in the template is too small or
too big for the src image). If crop is True, will crop the src image if necessary
to fit into the template image; otherwise, will resize if necessary
"""
curr_w, curr_h = src.size
if crop:
dx = (curr_w - w) // 2
dy = (curr_h - h) // 2
src = src.crop((dx, dy, w + dx, h + dy))
curr_w, curr_h = src.size
elif curr_w > w or curr_h > h:
resize_factor = min(w / curr_w, h / curr_h)
new_w = int(curr_w * resize_factor)
new_h = int(curr_h * resize_factor)
src = src.resize((new_w, new_h), resample=Image.BILINEAR)
curr_w, curr_h = src.size
if curr_w < w or curr_h < h:
src = pad_with_black(src, w, h)
return src
def scale_template_image(
src_w: int,
src_h: int,
template_image: Image.Image,
bbox: Tuple[int, int, int, int],
max_image_size_pixels: Optional[int],
crop: bool,
) -> Tuple[Image.Image, Tuple[int, int, int, int]]:
"""
Return template_image, and bbox resized to fit the src image. Takes in the
width & height of the src image plus the bounding box where the src image
will be inserted into template_image. If the template bounding box is
bigger than src image in both dimensions, template_image is scaled down
such that the dimension that was closest to src_image matches, without
changing the aspect ratio (and bbox is scaled proportionally). Similarly if
src image is bigger than the bbox in both dimensions, template_image and
the bbox are scaled up.
"""
template_w, template_h = template_image.size
left, upper, right, lower = bbox
bbox_w, bbox_h = right - left, lower - upper
# Scale up/down template_image & bbox
if crop:
resize_factor = min(src_w / bbox_w, src_h / bbox_h)
else:
resize_factor = max(src_w / bbox_w, src_h / bbox_h)
# If a max image size is provided & the resized template image would be too large,
# resize the template image to the max image size.
if max_image_size_pixels is not None:
template_size = template_w * template_h
if template_size * resize_factor ** 2 > max_image_size_pixels:
resize_factor = math.sqrt(max_image_size_pixels / template_size)
template_w = int(template_w * resize_factor)
template_h = int(template_h * resize_factor)
bbox_w, bbox_h = int(bbox_w * resize_factor), int(bbox_h * resize_factor)
left, upper = int(left * resize_factor), int(upper * resize_factor)
right, lower = left + bbox_w, upper + bbox_h
bbox = (left, upper, right, lower)
template_image = template_image.resize(
(template_w, template_h), resample=Image.BILINEAR
)
return template_image, bbox
def square_center_crop(src: Image.Image) -> Image.Image:
"""Returns a square crop of the center of the image"""
w, h = src.size
smallest_edge = min(w, h)
dx = (w - smallest_edge) // 2
dy = (h - smallest_edge) // 2
return src.crop((dx, dy, dx + smallest_edge, dy + smallest_edge))
def compute_transform_coeffs(
src_coords: List[Tuple[int, int]], dst_coords: List[Tuple[float, float]]
) -> np.ndarray:
"""
Given the starting & desired corner coordinates, computes the
coefficients required by the perspective transform.
"""
matrix = []
for sc, dc in zip(src_coords, dst_coords):
matrix.append([dc[0], dc[1], 1, 0, 0, 0, -sc[0] * dc[0], -sc[0] * dc[1]])
matrix.append([0, 0, 0, dc[0], dc[1], 1, -sc[1] * dc[0], -sc[1] * dc[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(src_coords).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
def compute_stripe_mask(
src_w: int, src_h: int, line_width: float, line_angle: float, line_density: float
) -> np.ndarray:
"""
Given stripe parameters such as stripe width, angle, and density, returns
a binary mask of the same size as the source image indicating the location
of stripes. This implementation is inspired by
https://stackoverflow.com/questions/34043381/how-to-create-diagonal-stripe-patterns-and-checkerboard-patterns
"""
line_angle *= math.pi / 180
line_distance = (1 - line_density) * min(src_w, src_h)
y_period = math.cos(line_angle) / line_distance
x_period = math.sin(line_angle) / line_distance
y_coord_range = | np.arange(0, src_h) | numpy.arange |
'''This script contains all of the functions used in our study'''
from __future__ import print_function
import math
import numpy as np
import scipy as sp
from scipy import special as spsp
from scipy import sparse as sparse
from scipy import stats as stats
from scipy import optimize as opt
from scipy import interpolate as interpolate
import time
import mpmath
import random
def poisson_loglh(data,lmbd):
'''
Calculate log likelihood of Poisson parameter lambda given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated Poisson parameter
Returns
-------
llh : float
log likelihood of lmbd given data
'''
llh=0
for x in data:
llh+=np.log(stats.poisson.pmf(x,lmbd))
return llh
def geo_loglh(data,lmbd):
'''
Calculate log likelihood of geometric parameter lambda given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated geometric parameter
Returns
-------
llh : float
log likelihood of lmbd given data
'''
llh=0
for x in data:
llh+=np.log(stats.geom.pmf(x,1/(lmbd+1),-1))
return llh
def neg_bin_loglh_theta(data,lmbd,theta):
'''
Calculate log likelihood of negative binomial parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated mean of negative binomial distribution
theta : float
estimated overdispersion of negative binomial distribution
Returns
-------
llh : float
log likelihood of lmbd and theta given data
'''
llh=0
for x in data:
llh+=np.log(stats.nbinom.pmf(x,lmbd/theta,1/(theta+1)))
return llh
def get_theta_mle(data,theta_0):
'''
Calculate maximum likelihood estimate of negative binomial overdispersion
parameter theta given sample data.
Parameters
----------
data : list
sample dataset
theta_0 : float
initial estimate of overdispersion parameter
Returns
-------
: float
maximum likelihood estimate of overdispersion parameter
'''
def f(theta):
lmbd=np.mean(data)
return -neg_bin_loglh_theta(data,lmbd,theta)
mle=sp.optimize.minimize(f,[theta_0],bounds=((1e-6,50),))
return mle.x[0]
def beta_poisson_pmf(x,lmbd,Phi,N):
'''
Evaluate the probability mass function for beta-Poisson distribution.
Parameters
----------
x : int or array
point(s) at which to evaluate function
lmbd : float
Phi : float
N : float
Returns
-------
P : float or array
probability of each point in x
'''
if type(x)==int:
P=spsp.hyp1f1(x+Phi*lmbd,x+Phi*N,-N)
for n in range(1,x+1): # This loop gives us the N^x/gamma(x+1 term)
P=(N/n)*P
for m in range(x): # This loop gives us the term with the two gamma functions in numerator and denominator
P=((m+Phi*lmbd)/(m+Phi*N))*P
else:
P=[]
for i in range(0,len(x)):
p=spsp.hyp1f1(x[i]+Phi*lmbd,x[i]+Phi*N,-N)
for n in range(1,x[i]+1): # This loop gives us the N^x/gamma(x+1 term)
p=(N/n)*p
for m in range(x[i]): # This loop gives us the term with the two gamma functions in numerator and denominator
p=((m+Phi*lmbd)/(m+Phi*N))*p
P=P+[p]
return P
hyp1f1_alt=np.frompyfunc(mpmath.hyp1f1,3,1)
def beta_poisson_loglh(data,lmbd,phi,N):
'''
Calculate log likelihood of beta-Poisson parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
phi : float
N : float
Returns
-------
llh : float
log likelihood of parameters given data
'''
llh=0
for x in data:
llh+=x*np.log(N)-np.real(spsp.loggamma(x+1))+np.real(spsp.loggamma(phi*N))+np.real(spsp.loggamma(x+phi*lmbd))-np.real(spsp.loggamma(x+phi*N))-np.real(spsp.loggamma(phi*lmbd))
if x+phi*N<50:
llh+=np.log(spsp.hyp1f1(x+phi*lmbd,x+phi*N,-N))
else:
llh+=np.log(float(hyp1f1_alt(x+phi*lmbd,x+phi*N,-N)))
return llh
def neg_bin_loglh(data,lmbd,phi):
'''
Calculate log likelihood of negative binomial parameters given data, with
negative binomial parameterised with phi rather than theta.
Parameters
----------
data : list
sample dataset
lmbd : float
estimated mean of negative binomial distribution
phi : float
Returns
-------
llh : float
log likelihood of lmbd and theta given data
'''
llh=0
for x in data:
llh+=np.log(stats.nbinom.pmf(x,lmbd*phi,phi/(phi+1)))
return llh
def get_phi_and_N_mles(data,phi_0,N_0):
'''
Calculate maximum likelihood estimates of beta-Poisson parameters Phi and N.
Parameters
----------
data : list
sample dataset
phi_0 : float
initial estimate of Phi
N_0 : float
initial estimate of N
Returns
-------
: float
maximum likelihood estimate of Phi
: float
maximum likelihood estimate of N
'''
def f(params):
lmbd=np.mean(data)
phi=params[0]
if params[1]>0.1e-3:
N=1/params[1]
return -beta_poisson_loglh(data,lmbd,phi,N)
else:
return -neg_bin_loglh(data,lmbd,phi)
mle=sp.optimize.minimize(f,[phi_0,N_0],bounds=((1e-6,50),(0,1/np.mean(data))))
if mle.x[1]<0:
mle.x[1]=0
return mle.x[0],mle.x[1]
def zip_pmf(x,lmbd,sigma):
'''
Evaluate the probability mass function for zero-inflated Poisson
distribution.
Parameters
----------
x : int or array
point(s) at which to evaluate function
lmbd : float
mean of Poisson component
sigma : float
degree of zero inflation
Returns
-------
P : float or array
probability of each point in x
'''
if type(x)==int:
return sigma*(x==0)+(1-sigma)*stats.poisson.pmf(x,lmbd)
else:
return sigma*np.equal(x,np.zeros(len(x)))+(1-sigma)*stats.poisson.pmf(x,lmbd)
def zip_loglh(data,lmbd,sigma):
'''
Calculate log likelihood of zero-inflated Poisson parameters given data.
Parameters
----------
data : list
sample dataset
lmbd : float
mean of Poisson component
sigma : float
degree of zero inflation
Returns
-------
llh : float
log likelihood of lmbd and sigma given data
'''
llh=0
for x in data:
if x==0:
llh+=np.log(sigma+(1-sigma)*np.exp(-lmbd))
else:
llh+=np.log(1-sigma)+np.log(stats.poisson.pmf(x,lmbd))
return llh
def get_zip_mles(data,lmbd_0,sigma_0):
'''
Calculate maximum likelihood estimates of ZIP parameters lambda and sigma.
Parameters
----------
data : list
sample dataset
lmbd_0 : float
initial estimate of lambda
sigma_0 : float
initial estimate of sigma
Returns
-------
: float
maximum likelihood estimate of lambda
: float
maximum likelihood estimate of sigma
'''
def f(params):
lmbd=params[0]
sigma=params[1]
return -zip_loglh(data,lmbd,sigma)
mle=sp.optimize.minimize(f,[lmbd_0,sigma_0],bounds=((np.mean(data),50),(0,1-1e-6)))
return mle.x[0],mle.x[1]
def beta_poisson_pgf(s,lmbd,phi,N):
'''
Probability generating function of the beta-Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Phi : float
N : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=spsp.hyp1f1(lmbd*phi,N*phi,N*(s-1));
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[spsp.hyp1f1(lmbd*phi,N*phi,N*(s[i]-1))]
return G
def poisson_pgf(s,lmbd):
'''
Probability generating function of the Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=np.exp(lmbd*(s-1))
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[np.exp(lmbd*(s[i]-1))]
return G
def geom_pgf(s,lmbd):
'''
Probability generating function of the geometric distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=1/(lmbd+1-lmbd*s)
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[1/(lmbd+1-lmbd*s[i])]
return G
def neg_bin_pgf(s,lmbd,theta):
'''
Probability generating function of the negative binomial distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
theta : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=(theta+1-s*theta)**(-lmbd/theta)
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[(theta+1-s[i]*theta)**(-lmbd/theta)]
return G
def zip_pgf(s,lmbd,sigma):
'''
Probability generating function of the zero-inflated Poisson distribution.
Parameters
----------
s : float
point at which to evaluate PGF
lmbd : float
sigma : float
Returns
-------
G : float or array
PGF evaluated at s
'''
if np.size(np.asarray(s))==1:
G=sigma+(1-sigma)*np.exp(lmbd*(s-1))
else:
G=[]
for i in range(0,np.size(np.asarray(s))):
G=G+[sigma+(1-sigma)*np.exp(lmbd*(s[i]-1))]
return G
def beta_poisson_extinction_prob( lmbd,phi,N ):
'''
Calculate the probability that the beta-Poisson branching process becomes
extinct.
Parameters
----------
lmbd : float
phi : float
N : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return beta_poisson_pgf(s,lmbd,phi,N)-s
q=opt.brentq(
f, 0, 1-1e-4);
return q
def poisson_extinction_prob( lmbd ):
'''
Calculate the probability that the Poisson branching process becomes
extinct.
Parameters
----------
lmbd : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return poisson_pgf(s,lmbd)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def geom_extinction_prob( lmbd ):
'''
Calculate the probability that the geometric branching process becomes
extinct.
Parameters
----------
lmbd : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return geom_pgf(s,lmbd)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def neg_bin_extinction_prob( lmbd,theta ):
'''
Calculate the probability that the negative binomial branching process
becomes extinct.
Parameters
----------
lmbd : float
theta : float
Returns
-------
q : float
extinction probability
'''
if lmbd<=1:
return 1
else:
def f(s):
return neg_bin_pgf(s,lmbd,theta)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def zip_extinction_prob( lmbd,sigma ):
'''
Calculate the probability that the zero-inflated Poisson branching process
becomes extinct.
Parameters
----------
lmbd : float
sigma : float
Returns
-------
q : float
extinction probability
'''
if (1-sigma)*lmbd<=1:
return 1
else:
def f(s):
return zip_pgf(s,lmbd,sigma)-s
q=opt.brentq(
f, 0, 1-1e-6)
return q
def empirical_loglh(data):
'''
Calculate upper bound on log likelihood by using empirical distribution
based on observed frequencies in data.
Parameters
----------
data : list
sample data
Returns
-------
llh : float
log likelihood
'''
counts,bins=np.histogram(data,max(data)+1)
dist=counts/len(data)
llh=0
for x in data:
llh+=np.log(dist[x])
return llh
def get_lambda_and_phi_mles(data,lmbd_0,phi_0,N_emp):
'''
Calculate maximum likelihood estimates of beta-Poisson parameters lambda and
Phi given empirical estimate of contact parameter N.
Parameters
----------
data : list
sample dataset
lmbd_0 : float
initial estimate of lambda
phi_0 : float
initial estimate of Phi
N_emp : float
empirical estimate of N
Returns
-------
: float
maximum likelihood estimate of lambda
: float
maximum likelihood estimate of Phi
'''
def f(params):
lmbd=params[0]
phi=params[1]
return -beta_poisson_loglh(data,lmbd,phi,N_emp)
mle=sp.optimize.minimize(f,[lmbd_0,phi_0],bounds=((1e-6,10),(1e-6,50)))
return mle.x[0],mle.x[1]
def generate_mle_dict(data,
theta_0,
phi_0,
N_0,
lmbd_0,
sigma_0):
'''
Calculate maximum likelihood estimates of parameters for each offspring
distribution and output them as a dictionary.
Parameters
----------
data : list
sample data to fit to
theta_0 : float
initial estimate of negative binomial overdispersion parameter
phi_0 : float
initial estimate of beta-Poisson parameter Phi
N_0 : float
initial estimate of beta-Poisson parameter Phi
lmbd_0 : float
initial estimate of ZIP baseline lambda
sigma_0 : float
initial estimate of ZIP inflation parameter sigma
Returns
-------
mle_dict : dictionary
dictionary containing maximum likelihood estimates of parameters for
each model.
'''
theta_mle=get_theta_mle(data,1.5)
phi_mle,N_inv_mle=get_phi_and_N_mles(data,0.5,1/2)
lmbd_mle,sigma_mle=get_zip_mles(data,1.5,0.5)
mle_dict = {
'poisson' : np.mean(data),
'geometric' : np.mean(data),
'negative binomial' : [np.mean(data), theta_mle],
'zip' : [lmbd_mle, sigma_mle],
'beta-Poisson' : [np.mean(data), phi_mle, N_inv_mle]
}
return mle_dict
def poisson_mle_grid(data, interval, points):
'''
Calculate a confidence interval for the MLE of the Poisson distribution
given some data using a grid calculation.
Parameters
----------
data : list
sample dataset
interval : list
list containing the lower and upper bounds on which to perform the
grid calculation
points : int
number of points to use in the grid calculation
Returns
-------
lmbd : array
value of lambda over grid
llh : array
log likelihood of lambda values over grid
mle : float
maximum likelihood estimate of lambda
ci : list
95% confidence interval for lambda given data
'''
lmbd=np.linspace(interval[0],interval[1],points)
llh=np.zeros(points)
for x in data:
llh=llh+np.log(stats.poisson.pmf(x,lmbd))
mle_loc=np.argmax(llh)
mle=lmbd[mle_loc]
lh_normed=np.exp(llh)/np.sum(np.exp(llh))
current_max=lh_normed[mle_loc]
interval_weight=current_max
while interval_weight<0.95:
max_loc=np.argmax(lh_normed[np.where(lh_normed<current_max)])
current_max=lh_normed[np.where(lh_normed<current_max)][max_loc]
interval_weight+=current_max
ci=[np.min(lmbd[np.where(lh_normed>=current_max)[0]]),np.max(lmbd[np.where(lh_normed>=current_max)[0]])]
return lmbd,llh,mle,ci
def geometric_mle_grid(data,interval,points):
'''
Calculate a confidence interval for the MLE of the geometric distribution
given some data using a grid calculation.
Parameters
----------
data : list
sample dataset
interval : list
list containing the lower and upper bounds on which to perform the
grid calculation
points : int
number of points to use in the grid calculation
Returns
-------
lmbd : array
value of lambda over grid
llh : array
log likelihood of lambda values over grid
mle : float
maximum likelihood estimate of lambda
ci : list
95% confidence interval for lambda given data
'''
lmbd=np.linspace(interval[0],interval[1],points)
llh=np.zeros(points)
for x in data:
llh=llh+np.log(stats.geom.pmf(x,1/(lmbd+1),-1))
mle_loc=np.argmax(llh)
mle=lmbd[mle_loc]
lh_normed= | np.exp(llh) | numpy.exp |
# (c) Copyright [2018] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################################################################################
# __ __ ___ ____ ______ ____ __ ____ ___ ___ _ ____ __ __ ______ __ __ ___ ____ #
# | | | / _| \| | | / ]/ | | | | | | \| | | | | |/ \| \ #
# | | |/ [_| D | || | / /| o | | _ _ | | | o | | | | | | | _ | #
# | | | _| /|_| |_|| |/ / | | | \_/ | |___ | _/| ~ |_| |_| _ | O | | | #
# | : | [_| \ | | | / \_| _ | | | | | | | |___, | | | | | | | | | #
# \ /| | . \ | | | \ | | | | | | | | | | | | | | | | | | | #
# \_/ |_____|__|\_| |__| |____\____|__|__| |___|___|_____| |__| |____/ |__| |__|__|\___/|__|__| #
# #
############################################################################################################
# Vertica-ML-Python allows user to create RVD (Resilient Vertica Dataset). #
# RVD simplifies data exploration, data cleaning and machine learning in Vertica. #
# It is an object which keeps in it all the actions that the user wants to achieve #
# and execute them when they are needed. #
#####################################################################################
# #
# Author: <NAME> #
# #
######################
# Libraries
import vertica_ml_python.rvd as rvd
import vertica_ml_python.rvc as rvc
from vertica_ml_python.fun import isnotebook
import sys,os
import shutil
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as mpatches
from matplotlib.ticker import MaxNLocator
import time
# Return model accuracy
def accuracy(model,threshold=0.5,input_class=1):
return 1-error_rate(model=model,threshold=threshold,input_class=input_class).data_columns[1][-1]
# Return model auc
def auc(model,input_class=1):
return roc(model=model,num_bins=30000,show=False,input_class=input_class)[0].data_columns[1][1]
# Champion Challenger for Binomial Model
# ALL THE COLUMNS OF THE INPUT RELATION MUST BE NUMERICAL
def champion_challenger_binomial(input_relation,response_column,predictor_columns,cursor,
fold_count=3,max_iterations=100,logit_optimizer='Newton',logit_regularization='None',
logit_alpha=0.5,rf_ntree=20,rf_mtry=None,rf_max_depth=5,rf_sampling_size=0.632):
if (not(isinstance(input_relation,str))):
raise TypeError("The parameter 'input_relation' must be a varchar")
if (not(isinstance(response_column,str))):
raise TypeError("The parameter 'response_column' must be a varchar")
if (not(isinstance(predictor_columns,list))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
else:
for column in predictor_columns:
if (not(isinstance(column,str))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
if (not(isinstance(fold_count,int)) or (fold_count<0)):
raise TypeError("The parameter 'fold_count' must be a positive integer")
if (not(isinstance(max_iterations,int)) or (max_iterations<0)):
raise TypeError("The parameter 'max_iterations' must be a positive integer")
if not(logit_optimizer in ["Newton","BFGS"]):
raise TypeError("The parameter 'logit_optimizer' must be in Newton|BFGS")
if not(logit_regularization in ["L2","None"]):
raise TypeError("The parameter 'logit_regularization' must be in L2|None")
if (not(isinstance(logit_alpha,float)) or (logit_alpha<0) or (logit_alpha>1)):
raise TypeError("The parameter 'logit_alpha' must be in [0,1]")
if (not(isinstance(rf_ntree,int)) or (rf_ntree<0)):
raise TypeError("The parameter 'rf_ntree' must be a positive integer")
if (not(isinstance(rf_max_depth,int)) or (rf_max_depth<0)):
raise TypeError("The parameter 'rf_max_depth' must be a positive integer")
if (not(isinstance(rf_sampling_size,float)) or (rf_sampling_size<0) or (rf_sampling_size>1)):
raise TypeError("The parameter 'rf_sampling_size' must be in [0,1]")
colors=['dodgerblue','seagreen','indianred','gold']
temp_rvd=rvd.RVD(input_relation,cursor)
test_name=input_relation+"_cc_binomial_test_"+str(np.random.randint(10000000))
train_name=input_relation+"_cc_binomial_train_"+str(np.random.randint(10000000))
all_logit_info=[[],[],[],[],['logistic_reg']]
all_svm_info=[[],[],[],[],['svm_classifier']]
all_nb_info=[[],[],[],[],['naive_bayes']]
all_rf_info=[[],[],[],[],['rf_classifier']]
for i in range(fold_count):
rvd.drop_table("random_vpython_table_"+input_relation,cursor,print_info=False)
temp_rvd.train_test_split(test_name=test_name,train_name=train_name,print_info=False)
# Logit
model_name="model_"+str(np.random.randint(10000000))
start_time=time.time()
logistic_reg(model_name=model_name,input_relation=train_name,response_column=response_column,
predictor_columns=predictor_columns,cursor=cursor,optimizer=logit_optimizer,
regularization=logit_regularization,alpha=logit_alpha,max_iterations=max_iterations)
all_logit_info[3]+=[time.time()-start_time]
logit=load_model(model_name,cursor,input_relation=test_name)
roc=logit.roc(show=False)
all_logit_info[0]+=[roc[0].data_columns[1][1]]
all_logit_info[1]+=[roc[1].data_columns]
error=logit.error_rate()
all_logit_info[2]+=[1-error.data_columns[1][-1]]
drop_model(model_name,cursor,print_info=False)
# SVM
model_name="model_"+str(np.random.randint(10000000))
start_time=time.time()
svm_classifier(model_name=model_name,input_relation=train_name,response_column=response_column,
predictor_columns=predictor_columns,cursor=cursor,max_iterations=max_iterations)
all_svm_info[3]+=[time.time()-start_time]
svm=load_model(model_name,cursor,input_relation=test_name)
roc=svm.roc(show=False)
all_svm_info[0]+=[roc[0].data_columns[1][1]]
all_svm_info[1]+=[roc[1].data_columns]
error=svm.error_rate()
all_svm_info[2]+=[1-error.data_columns[1][-1]]
drop_model(model_name,cursor,print_info=False)
# Naive Bayes
model_name="model_"+str(np.random.randint(10000000))
start_time=time.time()
naive_bayes(model_name=model_name,input_relation=train_name,response_column=response_column,
predictor_columns=predictor_columns,cursor=cursor)
all_nb_info[3]+=[time.time()-start_time]
svm=load_model(model_name,cursor,input_relation=test_name)
roc=svm.roc(show=False)
all_nb_info[0]+=[roc[0].data_columns[1][1]]
all_nb_info[1]+=[roc[1].data_columns]
error=svm.error_rate()
all_nb_info[2]+=[1-error.data_columns[1][-1]]
drop_model(model_name,cursor,print_info=False)
# Random Forest
model_name="model_"+str(np.random.randint(10000000))
start_time=time.time()
cursor.execute("create view "+train_name+"_rf as select "+", ".join(predictor_columns)+", "+response_column+"::varchar(1) from "+train_name)
rf_classifier(model_name=model_name,input_relation=train_name+"_rf",response_column=response_column,
predictor_columns=predictor_columns,cursor=cursor,ntree=rf_ntree,
mtry=rf_mtry,sampling_size=rf_sampling_size,max_depth=rf_max_depth)
all_rf_info[3]+=[time.time()-start_time]
rf=load_model(model_name,cursor,input_relation=test_name)
roc=rf.roc(show=False)
all_rf_info[0]+=[roc[0].data_columns[1][1]]
all_rf_info[1]+=[roc[1].data_columns]
error=rf.error_rate()
all_rf_info[2]+=[1-error.data_columns[1][-1]]
drop_model(model_name,cursor,print_info=False)
# End
rvd.drop_view(train_name+"_rf",cursor,print_info=False)
rvd.drop_view(test_name,cursor,print_info=False)
rvd.drop_view(train_name,cursor,print_info=False)
# DRAW THE METRICS
sample=list(range(fold_count))
# accuracy
plt.figure(figsize=(10,6))
plt.plot(sample,all_logit_info[2],label=all_logit_info[4][0]+" avg_accuracy="+str(np.mean(all_logit_info[2])),color=colors[0])
plt.plot(sample,all_svm_info[2],label=all_svm_info[4][0]+" avg_accuracy="+str(np.mean(all_svm_info[2])),color=colors[1])
plt.plot(sample,all_nb_info[2],label=all_nb_info[4][0]+" avg_accuracy="+str(np.mean(all_nb_info[2])),color=colors[2])
plt.plot(sample,all_rf_info[2],label=all_rf_info[4][0]+" avg_accuracy="+str(np.mean(all_rf_info[2])),color=colors[3])
plt.ylabel('accuracy')
plt.xlabel('sample')
plt.legend()
plt.grid()
plt.xticks(sample,sample)
plt.title('Binomial Champion Challenger on ["'+input_relation+'" Dataset]')
plt.show()
# time
plt.figure(figsize=(10,6))
plt.plot(sample,all_logit_info[3],label=all_logit_info[4][0]+" avg_time="+str(np.mean(all_logit_info[3])),color=colors[0])
plt.plot(sample,all_svm_info[3],label=all_svm_info[4][0]+" avg_time="+str(np.mean(all_svm_info[3])),color=colors[1])
plt.plot(sample,all_nb_info[3],label=all_nb_info[4][0]+" avg_time="+str(np.mean(all_nb_info[3])),color=colors[2])
plt.plot(sample,all_rf_info[3],label=all_rf_info[4][0]+" avg_time="+str(np.mean(all_rf_info[3])),color=colors[3])
plt.ylabel('time')
plt.xlabel('sample')
plt.legend()
plt.grid()
plt.xticks(sample,sample)
plt.title('Binomial Champion Challenger on ["'+input_relation+'" Dataset]')
plt.show()
# auc
plt.figure(figsize=(10,6))
plt.plot(sample,all_logit_info[0],label=all_logit_info[4][0]+" avg_auc="+str(np.mean(all_logit_info[0])),color=colors[0])
plt.plot(sample,all_svm_info[0],label=all_svm_info[4][0]+" avg_auc="+str(np.mean(all_svm_info[0])),color=colors[1])
plt.plot(sample,all_nb_info[0],label=all_nb_info[4][0]+" avg_auc="+str(np.mean(all_nb_info[0])),color=colors[2])
plt.plot(sample,all_rf_info[0],label=all_rf_info[4][0]+" avg_auc="+str(np.mean(all_rf_info[0])),color=colors[3])
plt.ylabel('auc')
plt.xlabel('sample')
plt.legend()
plt.grid()
plt.xticks(sample,sample)
plt.title('Binomial Champion Challenger on ["'+input_relation+'" Dataset]')
plt.show()
return(rvd.column_matrix([['','logistic_reg','svm_classifier','naive_bayes','rf_classifier'],
['avg_time',np.mean(all_logit_info[3]),np.mean(all_svm_info[3]),np.mean(all_nb_info[3]),np.mean(all_rf_info[3])],
['avg_auc',np.mean(all_logit_info[0]),np.mean(all_svm_info[0]),np.mean(all_nb_info[0]),np.mean(all_rf_info[0])],
['avg_accuracy',np.mean(all_logit_info[2]),np.mean(all_svm_info[2]),np.mean(all_nb_info[2]),np.mean(all_rf_info[2])],
['std_accuracy',np.std(all_logit_info[2]),np.std(all_svm_info[2]),np.std(all_nb_info[2]),np.std(all_rf_info[2])]]))
# Return the Confusion Matrix of the model
def confusion_matrix(model,threshold=0.5,input_class=1):
if (input_class==None):
use_input_class=False
else:
use_input_class=True
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(threshold,float)) or (threshold<0) or (threshold>1)):
raise TypeError("The parameter 'threshold' must be in [0,1]")
if ((model.category=="binomial") or (use_input_class and ((model.category=="multinomial") and ((input_class in model.classes) or (str(input_class) in model.classes))))):
query=("select confusion_matrix(obs, response using parameters num_classes=2) over() from (select (case when {}='{}' then 1 else 0 end) as obs, (case when predict_{}"+
"({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float>{} then 1 else 0 end) as response from {}) x")
query=query.format(model.response_column,input_class,model.model_type,",".join(model.predictor_columns),model.model_name,
input_class,threshold,model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
matrix=[['',0,1],[0,query_result[0][1],query_result[1][1]],[1,query_result[0][2],query_result[1][2]]]
if (model.model_type in ["naive_bayes","rf_classifier"]):
title=("Confusion Matrix with as positive '"+str(input_class)+"'")
else:
title=("Confusion Matrix")
return rvd.column_matrix(matrix,title=title)
elif (model.category=="multinomial"):
classes=model.classes
num_classes=str(len(classes))
query="select confusion_matrix(obs, response using parameters num_classes="+num_classes+") over() from (select decode("+model.response_column
for idx,item in enumerate(classes):
query+=",'"+str(item)+"',"+str(idx)
query+=") as obs, decode(predict_"+model.model_type+"("+",".join(model.predictor_columns)+" using parameters model_name='{}'".format(model.model_name)
query+=",type='response',match_by_pos='true')"
for idx,item in enumerate(classes):
query+=",'"+str(item)+"',"+str(idx)
query+=") as response from {}) x".format(model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
matrix=[[['']]*(len(classes)+1)]
matrix[0]=['']+classes
for idx in range(1,len(query_result[0])-1):
matrix+=[[classes[idx-1]]+[item[idx] for item in query_result]]
return rvd.column_matrix(matrix)
else:
raise Exception("The Confusion Matrix is only available for multinomial/binomial models.")
# Return all the details of the model
def details(model_name,cursor,attr_name="details",model_type=None,header=None):
if (not(isinstance(model_name,str))):
raise TypeError("The parameter 'model_name' must be a varchar")
if (not(isinstance(attr_name,str))):
raise TypeError("The parameter 'attr_name' must be a varchar")
query="select get_model_attribute(using parameters model_name='{}',attr_name='{}')".format(model_name,attr_name)
cursor.execute(query)
query_result=cursor.fetchall()
if (attr_name=="prior"):
header=['class','value']
elif (model_type=="rf"):
header=['column','type']
else:
header=['','coefficient','std_error','t_value','p_value']
columns=[]
for i in range(len(query_result[0])):
columns+=[[item[i] for item in query_result]]
for i in range(len(columns)):
columns[i]=[header[i]]+columns[i]
return rvd.column_matrix(columns,first_element=header[0])
# Drop Model if it exists
def drop_model(model_name,cursor,print_info=True):
cursor.execute("select 1;")
try:
query="drop model {};".format(model_name)
cursor.execute(query)
if (print_info):
print("The model {} was successfully dropped.".format(model_name))
except:
print("/!\\ Warning: The model {} doesn't exist !".format(model_name))
# Draw the Elbow curve: Help to find the best K for Kmeans
def elbow(input_relation,input_columns,cursor,min_num_cluster=1,max_num_cluster=15,max_iterations=10,
epsilon=1e-4,init_method="kmeanspp",print_each_round=False):
if (not(isinstance(input_columns,list))):
raise TypeError("The parameter 'input_columns' must be a list of varchar")
else:
for column in input_columns:
if (not(isinstance(column,str))):
raise TypeError("The parameter 'input_columns' must be a list of varchar")
if (not(isinstance(max_num_cluster,int)) or (max_num_cluster<0)):
raise TypeError("The parameter 'max_num_cluster' must be a positive integer")
if (not(isinstance(max_iterations,int)) or (max_iterations<0)):
raise TypeError("The parameter 'max_iterations' must be a positive integer")
if (not(isinstance(epsilon,float)) or (epsilon<0)):
raise TypeError("The parameter 'epsilon' must be a positive float")
if (not(isinstance(input_relation,str))):
raise TypeError("The parameter 'input_relation' must be a varchar")
all_within_cluster_SS=[]
for i in range(min_num_cluster,max_num_cluster):
if (print_each_round):
print("Round "+str(i)+" begins")
name="_vpython_kmeans_"+str(np.random.randint(1000000))
query="drop model if exists {};".format(name)
cursor.execute(query)
all_within_cluster_SS+=[kmeans(name,input_relation,input_columns,i,cursor,
max_iterations=max_iterations,epsilon=epsilon,init_method=init_method).within_cluster_SS()]
if (print_each_round):
print("Round "+str(i)+" ends")
query="drop model if exists {};".format(name)
cursor.execute(query)
num_clusters=range(min_num_cluster,max_num_cluster)
plt.rcParams['axes.facecolor']='#F4F4F4'
plt.grid()
plt.plot(num_clusters,all_within_cluster_SS,marker="s",color="dodgerblue")
plt.title("Elbow Curve")
plt.xlabel('Number of Clusters')
plt.ylabel('Within-Cluster SS')
plt.subplots_adjust(left=0.2)
plt.show()
return rvd.column_matrix([['num_clusters']+list(num_clusters),['all_within_cluster_SS']+all_within_cluster_SS],first_element='num_clusters')
# Return the Error Rate
def error_rate(model,threshold=0.5,input_class=1):
if (input_class==None):
use_input_class=False
else:
use_input_class=True
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(threshold,float)) or (threshold<0) or (threshold>1)):
raise TypeError("The parameter 'threshold' must be in [0,1]")
if ((model.category=="binomial") or (use_input_class and ((model.category=="multinomial") and ((input_class in model.classes) or (str(input_class) in model.classes))))):
query=("select error_rate(obs, response using parameters num_classes=2) over() from (select (case when {}='{}' then 1 else 0 end) as obs, (case when predict_{}"+
"({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float>{} then 1 else 0 end) as response from {}) x")
query=query.format(model.response_column,input_class,model.model_type,",".join(model.predictor_columns),model.model_name,
input_class,threshold,model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
matrix=[['',0,1,'total'],['error_rate',query_result[0][1],query_result[1][1],query_result[2][1]]]
if (model.model_type in ["naive_bayes","rf_classifier"]):
title=("Error Rate Table with as positive '"+str(input_class)+"'")
else:
title=("Error Rate")
return rvd.column_matrix(matrix,title=title)
elif (model.category=="multinomial"):
classes=model.classes
num_classes=str(len(classes))
query="select error_rate(obs, response using parameters num_classes="+num_classes+") over() from (select decode("+model.response_column
for idx,item in enumerate(classes):
query+=",'"+str(item)+"',"+str(idx)
query+=") as obs, decode(predict_"+model.model_type+"("+",".join(model.predictor_columns)+" using parameters model_name='{}',match_by_pos='true'".format(model.model_name)
query+=",type='response')"
for idx,item in enumerate(classes):
query+=",'"+str(item)+"',"+str(idx)
query+=") as response from {}) x".format(model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
matrix=[['']+classes+['total'],['error_rate']+[item[1] for item in query_result]]
return rvd.column_matrix(matrix)
else:
raise Exception("The Error Rate is only available for multinomial/binomial models")
# Return the Features Importance
def features_importance(model,show=True,with_intercept=False):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes,kmeans)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(show,bool))):
raise TypeError("The parameter 'show' must be a bool")
if (not(isinstance(with_intercept,bool))):
raise TypeError("The parameter 'with_intercept' must be a bool")
if (model.model_type in ["linear_reg","svm_regressor","svm_classifier","logistic_reg"]):
columns=model.details().data_columns
if (type(model.all_importances)!=list):
coefficients=columns[0]
all_importances=columns[1]
del coefficients[0]
del all_importances[0]
del coefficients[0]
query=[]
for item in coefficients:
query+=["avg({}),stddev({})".format(item,item)]
query="select "+",".join(query)+" from "+model.input_relation
model.cursor.execute(query)
avg_std=model.cursor.fetchone()
all_avg=[]
all_std=[]
for i in range(len(avg_std)):
if ((i%2)==0):
all_avg+=[avg_std[i]]
else:
all_std+=[avg_std[i]]
coefficients=['Intercept']+coefficients
all_importances[0]+=sum([all_avg[i]*columns[1][i+1] for i in range(len(all_avg))])
for i in range(1,len(all_importances)):
all_importances[i]=all_importances[i]*all_std[i-1]
if not(with_intercept):
del all_importances[0]
del coefficients[0]
all_sign=[item>0 for item in all_importances]
all_importances=[abs(item) for item in all_importances]
all_importances=[item/sum(all_importances) for item in all_importances]
model.all_importances=[coefficients,all_importances,all_sign]
else:
coefficients=model.all_importances[0]
all_importances=model.all_importances[1]
all_sign=model.all_importances[2]
elif (model.model_type=="kmeans"):
query=[]
if (type(model.all_importances)!=list):
try:
for item in model.input_columns:
for i in range(0,model.num_clusters):
query_item="corr((case when apply_kmeans({} using parameters model_name='{}', match_by_pos='True') = {} then 1 else 0 end),{})"
query_item=query_item.format(",".join(model.input_columns),model.model_name,i,item)
query+=[query_item]
query="select "+",".join(query)+" from "+model.input_relation
model.cursor.execute(query)
query_result=model.cursor.fetchone()
except:
query_result=[]
for item in model.input_columns:
for i in range(0,model.num_clusters):
query_item="corr((case when apply_kmeans({} using parameters model_name='{}', match_by_pos='True') = {} then 1 else 0 end),{})"
query_item=query_item.format(",".join(model.input_columns),model.model_name,i,item)
query="select "+query_item+" from "+model.input_relation
model.cursor.execute(query)
query_result+=[model.cursor.fetchone()[0]]
all_importances=[]
k=0
importance=0
all_sign=[]
for item in query_result:
if (k==model.num_clusters):
all_importances+=[importance]
importance=0
k=0
k+=1
all_sign+=[item>0]
importance+=abs(item)
all_importances=all_importances+[importance]
all_importances=[item/sum(all_importances) for item in all_importances]
model.all_importances=[all_importances,all_sign]
else:
all_importances=model.all_importances[0]
all_sign=model.all_importances[1]
coefficients=model.input_columns
else:
raise Exception("The features_importance function is not yet implemented for '{}' model.".format(model.model_type))
all_importances,coefficients,all_sign=zip(*sorted(zip(all_importances,coefficients,all_sign)))
coefficients=[item for item in coefficients]
all_importances=[item for item in all_importances]
all_sign=[item for item in all_sign]
if (show):
plt.figure(figsize=(7,5))
plt.rcParams['axes.facecolor']='#F5F5F5'
color=[]
for item in all_sign:
if (item):
color+=['dodgerblue']
else:
color+=['mediumseagreen']
plt.barh(range(0,len(all_importances)),all_importances,0.9,color=color,alpha=0.86)
orange=mpatches.Patch(color='mediumseagreen', label='sign -')
blue=mpatches.Patch(color='dodgerblue', label='sign +')
plt.legend(handles=[orange,blue],loc="lower right")
plt.ylabel("Features")
plt.xlabel("Importance")
plt.title("Model {}: '{}'".format(model.model_type,model.model_name))
plt.gca().xaxis.grid()
plt.gca().set_axisbelow(True)
plt.yticks(range(0,len(all_importances)),coefficients)
plt.show()
return rvd.column_matrix([[""]+coefficients,['Importance']+all_importances])
# Return the Lift table
def lift_table(model,num_bins=200,color=["dodgerblue","#444444"],show=True,input_class=1):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(num_bins,int)) or (num_bins<0)):
raise TypeError("The parameter 'num_bins' must be a positive integer")
if (not(isinstance(show,bool))):
raise TypeError("The parameter 'show' must be a bool")
if ((model.category=="binomial") or ((model.category=="multinomial") and ((input_class in model.classes) or (str(input_class) in model.classes)))):
query=("select lift_table(obs, prob using parameters num_bins={}) over() from (select (case when {}='{}' then 1 else 0 end) as obs, predict_{}"+
"({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float as prob from {}) as prediction_output")
query=query.format(num_bins,model.response_column,input_class,model.model_type,",".join(model.predictor_columns),model.model_name,
input_class,model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
decision_boundary=[item[0] for item in query_result]
positive_prediction_ratio=[item[1] for item in query_result]
lift=[item[2] for item in query_result]
decision_boundary.reverse()
if (show):
plt.figure(figsize=(7,5),facecolor='white')
plt.rcParams['axes.facecolor']='#F5F5F5'
plt.xlabel('Cumulative Data Fraction')
plt.plot(decision_boundary,lift,color=color[0])
plt.plot(decision_boundary,positive_prediction_ratio,color=color[1])
if (model.category=="multinomial"):
plt.title(model.model_name+" Lift Table of class '{}'".format(input_class))
else:
plt.title(model.model_name+" Lift Table")
plt.gca().set_axisbelow(True)
plt.grid()
color1=mpatches.Patch(color=color[0], label='Cumulative Lift')
color2=mpatches.Patch(color=color[1], label='Cumulative Capture Rate')
plt.legend(handles=[color1,color2])
plt.show()
return rvd.column_matrix([['decision_boundary']+decision_boundary,['positive_prediction_ratio']+positive_prediction_ratio,['lift']+lift],repeat_first_column=False)
else:
raise Exception("The Lift Table is only available for multinomial/binomial models with a correct class.")
# Load the Model
def load_model(model_name,cursor,input_relation=None):
if (not(isinstance(model_name,str))):
raise TypeError("The parameter 'model_name' must be a varchar")
cursor.execute("select model_type from models where model_name='"+model_name+"'")
model_type=cursor.fetchone()[0].lower()
if (model_type=="kmeans"):
query="select attr_fields from (select get_model_attribute(using parameters model_name='{}')) x limit 1;"
query=query.format(model_name)
cursor.execute(query)
input_columns=cursor.fetchone()[0]
input_columns=input_columns.split(", ")
num_clusters=len(input_columns)
if (type(input_relation)!=str):
summarize=summarize_model(model_name,cursor).lower()
input_relation=summarize.split("kmeans(")[1].split(",")[1].split("'")[1]
return kmeans(cursor,model_name,input_relation,input_columns,num_clusters,load=True)
elif (model_type=="cross_validation"):
return cross_validate("","","","",cursor,model_name=model_name,load=True)
else:
query="select predictor from (select get_model_attribute(using parameters model_name='{}',attr_name='details')) x;"
query=query.format(model_name)
cursor.execute(query)
predictor_columns=cursor.fetchall()
predictor_columns=[item for sublist in predictor_columns for item in sublist]
if (model_type in ["linear_regression","svm_regressor","svm_classifier","logistic_regression","naive_bayes"]):
predictor_columns=predictor_columns[1:len(predictor_columns)]
query="select get_model_attribute(using parameters model_name='{}',attr_name='call_string');"
query=query.format(model_name)
cursor.execute(query)
query_result=cursor.fetchone()[0]
response_column=query_result.split(',')[2].replace("'","").replace('"',"").replace(' ',"")
if (type(input_relation)!=str):
input_relation=query_result.split(',')[1].replace("'","").replace('"',"").replace(' ',"")
if (model_type=="linear_regression"):
return linear_reg(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="svm_regressor"):
return svm_regressor(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="svm_classifier"):
return svm_classifier(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="naive_bayes"):
return naive_bayes(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="logistic_regression"):
return logistic_reg(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="rf_classifier"):
return rf_classifier(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
elif (model_type=="rf_regressor"):
return rf_regressor(model_name,input_relation,response_column,predictor_columns,cursor,load=True)
else:
raise Exception("The model '{}' is not took in charge.".format(model_type))
# return the Log loss for multinomial model
def logloss(model):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if ((model.category in ["binomial","multinomial"])):
if (model.model_type in ["svm_classifier","logistic_reg"]):
query=("select avg(case when {}=1 then -log(predict_{}({} using parameters model_name='{}',type='probability',class='1',match_by_pos='true')::float+0.000001)"+
" else -log(1-predict_{}({} using parameters model_name='{}',type='probability',class='1',match_by_pos='true')::float+0.000001) end) from {};")
query=query.format(model.response_column,model.model_type,",".join(model.predictor_columns),model.model_name,model.model_type,
",".join(model.predictor_columns),model.model_name,model.input_relation)
model.cursor.execute(query)
logloss_value=model.cursor.fetchone()[0]
else:
logloss_value=0
for current_class in model.classes:
query=("select avg(case when {}='{}' then -log(predict_{}({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float+0.000001)"+
" else -log(1-predict_{}({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float+0.000001) end) from {};")
query=query.format(model.response_column,current_class,model.model_type,",".join(model.predictor_columns),model.model_name,current_class,model.model_type,
",".join(model.predictor_columns),model.model_name,current_class,model.input_relation)
model.cursor.execute(query)
logloss_value+=model.cursor.fetchone()[0]
logloss_value=logloss_value/len(model.classes)
model.logloss_value=logloss_value
return logloss_value
else:
raise Exception("The logloss is only available for multinomial/binomial models")
# Return the ntree metric curve for a rf_classifier algorithm
def metric_rf_curve_ntree(input_relation,test_relation,response_column,predictor_columns,cursor,
mode='logloss',ntree_begin=1,ntree_end=20,mtry=None,sampling_size=0.632,
max_depth=5,max_breadth=32,min_leaf_size=5,min_info_gain=0.0,nbins=32,
test_only=True):
if (not(isinstance(predictor_columns,list))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
else:
for column in predictor_columns:
if (not(isinstance(column,str))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
if (not(isinstance(input_relation,str))):
raise TypeError("The parameter 'input_relation' must be a varchar")
if (not(isinstance(test_relation,str))):
raise TypeError("The parameter 'test_relation' must be a varchar")
if (not(isinstance(response_column,str))):
raise TypeError("The parameter 'response_column' must be a varchar")
if (not(isinstance(ntree_begin,int)) or (ntree_begin<0)):
raise TypeError("The parameter 'ntree_begin' must be a positive integer")
if (not(isinstance(ntree_end,int)) or (ntree_end<0)):
raise TypeError("The parameter 'ntree_end' must be a positive integer")
if (not(isinstance(max_depth,int)) or (max_depth<0)):
raise TypeError("The parameter 'max_depth' must be a positive integer")
if (not(isinstance(max_breadth,int)) or (max_breadth<0)):
raise TypeError("The parameter 'max_breadth' must be a positive integer")
if (not(isinstance(min_leaf_size,int)) or (min_leaf_size<0)):
raise TypeError("The parameter 'min_leaf_size' must be a positive integer")
if (not(isinstance(nbins,int)) or (nbins<0)):
raise TypeError("The parameter 'nbins' must be a positive integer")
if (not(isinstance(sampling_size,float)) or (sampling_size<0) or (sampling_size>1)):
raise TypeError("The parameter 'sampling_size' must be in [0,1]")
if (not(isinstance(min_info_gain,float)) or (min_info_gain<0)):
raise TypeError("The parameter 'min_info_gain' must be a positive float")
marker='s'
if (ntree_end-ntree_begin>20):
marker=None
all_error_test=[]
all_error_train=[]
if (mode not in ['logloss','accuracy','error_rate','auc']):
raise TypeError("Mode must be in logloss|accuracy|error_rate|auc.")
for i in range(ntree_begin,ntree_end+1):
name="_vpython_error_"+str(np.random.randint(1000000))
query="drop model if exists {};".format(name)
cursor.execute(query)
rf_classifier(name,input_relation,response_column,predictor_columns,cursor,ntree=i,
mtry=mtry,sampling_size=sampling_size,max_depth=max_depth,max_breadth=max_breadth,min_leaf_size=min_leaf_size,
min_info_gain=min_info_gain,nbins=nbins)
model=load_model(name,cursor,test_relation)
if (mode=='logloss'):
all_error_test+=[model.logloss()]
elif (mode=='error_rate'):
all_error_test+=[model.error_rate().data_columns[1][3]]
elif (mode=='accuracy'):
all_error_test+=[1-model.error_rate().data_columns[1][3]]
elif (mode=='auc'):
all_error_test+=[model.roc(show=False)[0].data_columns[1][1]]
model=load_model(name,cursor,input_relation)
if (mode=='logloss'):
all_error_train+=[model.logloss()]
elif (mode=='error_rate'):
all_error_train+=[model.error_rate().data_columns[1][3]]
elif (mode=='accuracy'):
all_error_train+=[1-model.error_rate().data_columns[1][3]]
elif (mode=='auc'):
all_error_train+=[model.roc(show=False)[0].data_columns[1][1]]
query="drop model if exists {};".format(name)
cursor.execute(query)
ntrees=range(ntree_begin,ntree_end+1)
plt.rcParams['axes.facecolor']='#F4F4F4'
plt.grid()
plt.plot(ntrees,all_error_test,marker=marker,color='dodgerblue')
if not(test_only):
plt.plot(ntrees,all_error_train,marker=marker,color='mediumseagreen')
plt.title(mode+" curve")
plt.xlabel('ntree')
plt.ylabel(mode)
if not(test_only):
orange=mpatches.Patch(color='mediumseagreen', label='train')
blue=mpatches.Patch(color='dodgerblue', label='test')
plt.legend(handles=[orange,blue])
plt.xlim(ntree_begin,ntree_end)
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.subplots_adjust(left=0.2)
plt.show()
return (rvd.column_matrix([["ntree"]+list(ntrees),[mode+"_test"]+all_error_test],first_element="ntree"),
rvd.column_matrix([["ntree"]+list(ntrees),[mode+"_train"]+all_error_train],first_element="ntree"))
# Return the depth metric curve for the rf_classifier algorithm
def metric_rf_curve_depth(input_relation,test_relation,response_column,predictor_columns,cursor,mode='logloss',
ntree=20,mtry=None,sampling_size=0.632,max_depth_begin=1,max_depth_end=12,
max_breadth=32,min_leaf_size=5,min_info_gain=0.0,nbins=32,test_only=True):
if (not(isinstance(predictor_columns,list))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
else:
for column in predictor_columns:
if (not(isinstance(column,str))):
raise TypeError("The parameter 'predictor_columns' must be a list of varchar")
if (not(isinstance(input_relation,str))):
raise TypeError("The parameter 'input_relation' must be a varchar")
if (not(isinstance(test_relation,str))):
raise TypeError("The parameter 'test_relation' must be a varchar")
if (not(isinstance(response_column,str))):
raise TypeError("The parameter 'response_column' must be a varchar")
if (not(isinstance(ntree,int)) or (ntree<0)):
raise TypeError("The parameter 'ntree' must be a positive integer")
if (not(isinstance(max_depth_begin,int)) or (max_depth_begin<0)):
raise TypeError("The parameter 'max_depth_begin' must be a positive integer")
if (not(isinstance(max_depth_end,int)) or (max_depth_end<0)):
raise TypeError("The parameter 'max_depth_end' must be a positive integer")
if (not(isinstance(max_breadth,int)) or (max_breadth<0)):
raise TypeError("The parameter 'max_breadth' must be a positive integer")
if (not(isinstance(min_leaf_size,int)) or (min_leaf_size<0)):
raise TypeError("The parameter 'min_leaf_size' must be a positive integer")
if (not(isinstance(nbins,int)) or (nbins<0)):
raise TypeError("The parameter 'nbins' must be a positive integer")
if (not(isinstance(sampling_size,float)) or (sampling_size<0) or (sampling_size>1)):
raise TypeError("The parameter 'sampling_size' must be in [0,1]")
if (not(isinstance(min_info_gain,float)) or (min_info_gain<0)):
raise TypeError("The parameter 'min_info_gain' must be a positive float")
marker='s'
if (max_depth_end-max_depth_begin>20):
marker=None
all_error_test=[]
all_error_train=[]
if (mode not in ['logloss','accuracy','error_rate','auc']):
raise TypeError("Mode must be in logloss|accuracy|error_rate|auc.")
for i in range(max_depth_begin,max_depth_end+1):
name="_vpython_error_"+str(np.random.randint(1000000))
query="drop model if exists {};".format(name)
cursor.execute(query)
model=rf_classifier(name,input_relation,response_column,predictor_columns,cursor,ntree=ntree,
mtry=mtry,sampling_size=sampling_size,max_depth=i,max_breadth=max_breadth,min_leaf_size=min_leaf_size,
min_info_gain=min_info_gain,nbins=nbins)
model=load_model(name,cursor,test_relation)
if (mode=='logloss'):
all_error_test+=[model.logloss()]
elif (mode=='error_rate'):
all_error_test+=[model.error_rate().data_columns[1][3]]
elif (mode=='accuracy'):
all_error_test+=[1-model.error_rate().data_columns[1][3]]
elif (mode=='auc'):
all_error_test+=[model.roc(show=False)[0].data_columns[1][1]]
model=load_model(name,cursor,input_relation)
if (mode=='logloss'):
all_error_train+=[model.logloss()]
elif (mode=='error_rate'):
all_error_train+=[model.error_rate().data_columns[1][3]]
elif (mode=='accuracy'):
all_error_train+=[1-model.error_rate().data_columns[1][3]]
elif (mode=='auc'):
all_error_train+=[model.roc(show=False)[0].data_columns[1][1]]
query="drop model if exists {};".format(name)
cursor.execute(query)
max_depth=range(max_depth_begin,max_depth_end+1)
plt.rcParams['axes.facecolor']='#F4F4F4'
plt.grid()
plt.plot(max_depth,all_error_test,marker=marker,color='dodgerblue')
if not(test_only):
plt.plot(max_depth,all_error_train,marker=marker,color='mediumseagreen')
plt.title(mode+" curve")
plt.xlabel('max_depth')
plt.ylabel(mode)
if not(test_only):
orange=mpatches.Patch(color='mediumseagreen', label='train')
blue=mpatches.Patch(color='dodgerblue', label='test')
plt.legend(handles=[orange,blue])
plt.xlim(max_depth_begin,max_depth_end)
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.subplots_adjust(left=0.2)
plt.show()
return (rvd.column_matrix([["max_depth"]+list(max_depth),[mode+"_test"]+all_error_test],first_element="max_depth"),
rvd.column_matrix([["max_depth"]+list(max_depth),[mode+"_train"]+all_error_train],first_element="max_depth"))
# Return the mse of the model for a reg model
def mse(model):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (model.mse_val==None):
query=("select mse(obs,prediction) over () from (select "+model.response_column+
" as obs, predict_"+model.model_type+"("+",".join(model.predictor_columns)+" using parameters "+
"model_name='"+model.model_name+"',match_by_pos='true') as prediction from "+model.input_relation+") x;")
model.cursor.execute(query)
model.mse_val=model.cursor.fetchone()[0]
return abs(model.mse_val)
# Return the value of the concerned model parameter: use * to see them all
def parameter_value(model,parameter_name):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (parameter_name in ["regularization","rejected_row_count","accepted_row_count","iteration_count","alpha","tree_count"]):
query="select get_model_attribute(using parameters model_name='{}',attr_name='{}')".format(model.model_name,parameter_name)
model.cursor.execute(query)
return model.cursor.fetchone()[0]
elif ((parameter_name=="l") or (parameter_name=="lambda")):
query="select get_model_attribute(using parameters model_name='{}',attr_name='regularization')".format(model.model_name,parameter_name)
model.cursor.execute(query)
return model.cursor.fetchone()[1]
else:
print("/!\\ Warning: The parameter doesn't exist.")
# Plot for regressions
def plot_reg(model,color=None,projection=None,max_nb_points=1000,show=True):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(max_nb_points,int)) or (max_nb_points<0)):
raise TypeError("The parameter 'max_nb_points' must be a positive integer")
if (not(isinstance(show,bool))):
raise TypeError("The parameter 'show' must be a varchar")
if (model.category=="regression"):
coefficients=model.details().data_columns
a0=float(coefficients[1][1])
if (type(projection)==list):
if (len(projection)>len(model.predictor_columns)):
return model.plot(max_nb_points=max_nb_points,show=show)
else:
idx=coefficients[0].index(projection[0])
a1=coefficients[1][idx]
if (len(projection)>1):
idx=coefficients[0].index(projection[1])
a2=coefficients[1][idx]
else:
a1=float(coefficients[1][2])
if (len(coefficients[1]))>=4:
a2=float(coefficients[1][3])
projection=model.predictor_columns
if (len(projection)==1):
if ((type(color)!=list) or (len(color)!=2)):
color=["dodgerblue","black"]
query="select {},{},random() from {} where {} is not null and {} is not null order by 3 limit {}".format(projection[0],
model.response_column,model.input_relation,projection[0],model.response_column,max_nb_points)
model.cursor.execute(query)
all_points=model.cursor.fetchall()
column1=[float(item[0]) for item in all_points]
column2=[float(item[1]) for item in all_points]
min_r=float(min(column1))
max_r=float(max(column1))
if (show):
plt.figure(figsize=(7,5),facecolor='white')
plt.gca().grid()
plt.gca().set_axisbelow(True)
plt.scatter(column1,column2,color=color[0],s=15)
plt.plot([min_r,max_r],[a0+a1*min_r,a0+a1*max_r],color=color[1])
plt.xlabel(projection[0])
plt.ylabel(model.response_column)
plt.title(model.model_type+': '+model.response_column+'='+str(round(a0,3))+"+("+str(round(a1,3))+")*"+projection[0])
plt.show()
return [column1,column2,[a0,a1]]
elif (len(projection)==2):
if ((type(color)!=list) or (len(color)!=2)):
color=["dodgerblue","gray"]
query="select {},{},{},random() from {} where {} is not null and {} is not null and {} is not null order by 3 limit {}".format(
projection[0],projection[1],model.response_column,model.input_relation,projection[0],projection[1],model.response_column,max_nb_points)
model.cursor.execute(query)
all_points=model.cursor.fetchall()
column1=[float(item[0]) for item in all_points]
column2=[float(item[1]) for item in all_points]
column3=[float(item[2]) for item in all_points]
min_r1=float(min(column1))
max_r1=float(max(column1))
min_r2=float(min(column2))
max_r2=float(max(column2))
if (show):
fig=plt.figure(figsize=(7,5),facecolor='white')
ax=fig.add_subplot(111,projection='3d')
X=np.arange(min_r1,max_r1,(max_r1-min_r1)/5.0)
Y=np.arange(min_r2,max_r2,(max_r2-min_r2)/5.0)
X,Y=np.meshgrid(X, Y)
Z=a0+a1*X+a2*Y
ax.scatter(column1,column2,column3,color[0],s=15)
ax.plot_surface(X,Y,Z, rstride=1, cstride=1, alpha=0.8,color=color[1])
ax.set_xlabel(projection[0])
ax.set_ylabel(projection[1])
ax.set_zlabel(model.response_column)
plt.title(model.model_type+': '+model.response_column+'='+str(round(a0,3))+"+("+str(round(a1,3))+
")*"+projection[0]+"+("+str(round(a2,3))+")*"+projection[1])
plt.show()
else:
print("/!\\ Warning: The dimension is too big.")
print("Please use the 'projection' parameter to see a projection of your figure")
else:
raise Exception("The model must be a regression to use this function")
# Return model metrics for a reg model
def reg_metrics(model):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
mse_val=model.mse()
rsquared_val=model.rsquared()
return rvd.column_matrix([['','mse','rsquared'],['value',mse_val,rsquared_val]])
# Plot the ROC curve
def roc(model,num_bins=1000,color=["dodgerblue","#444444"],show=True,input_class=1):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (not(isinstance(num_bins,int)) or (num_bins<0)):
raise TypeError("The parameter 'num_bins' must be a positive integer")
if (not(isinstance(show,bool))):
raise TypeError("The parameter 'show' must be a varchar")
if ((model.category=="binomial") or ((model.category=="multinomial") and ((input_class in model.classes) or (str(input_class) in model.classes)))):
query=("select roc(obs, prob using parameters num_bins={}) over() from (select (case when {}='{}' then 1 else 0 end) as obs, predict_{}"+
"({} using parameters model_name='{}',type='probability',class='{}',match_by_pos='true')::float as prob from {}) as prediction_output")
query=query.format(num_bins,model.response_column,input_class,model.model_type,",".join(model.predictor_columns),model.model_name,
input_class,model.input_relation)
model.cursor.execute(query)
query_result=model.cursor.fetchall()
threshold=[item[0] for item in query_result]
false_positive=[item[1] for item in query_result]
true_positive=[item[2] for item in query_result]
auc=0
for i in range(len(false_positive)-1):
if (false_positive[i+1]-false_positive[i]!=0.0):
a=(true_positive[i+1]-true_positive[i])/(false_positive[i+1]-false_positive[i])
b=true_positive[i+1]-a*false_positive[i+1]
auc=auc+a*(false_positive[i+1]*false_positive[i+1]-false_positive[i]*false_positive[i])/2+b*(false_positive[i+1]-false_positive[i]);
auc=-auc
best_threshold_arg=np.argmax([abs(y-x) for x,y in zip(false_positive,true_positive)])
best_threshold=threshold[best_threshold_arg]
if (model.model_type!="svm_classifier"):
metrics=rvd.column_matrix([['','auc','best_threshold'],['value',auc,best_threshold]])
if (show):
plt.figure(figsize=(7,5),facecolor='white')
plt.rcParams['axes.facecolor']='#F5F5F5'
plt.xlabel('False Positive Rate (1-Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.plot(false_positive,true_positive,color=color[0])
plt.plot([0,1],[0,1],color=color[1])
plt.ylim(0,1)
plt.xlim(0,1)
if (model.category=="multinomial"):
plt.title(model.model_name+" ROC Curve of class '{}'\nAUC=".format(input_class)+str(auc))
else:
plt.title(model.model_name+" ROC Curve\nAUC="+str(auc))
plt.gca().set_axisbelow(True)
plt.grid()
plt.show()
if (model.model_type!="svm_classifier"):
return metrics,rvd.column_matrix([['threshold']+threshold,['false_positive']+false_positive,['true_positive']+true_positive],repeat_first_column=False)
else:
return rvd.column_matrix([['','auc'],['value',auc]]),rvd.column_matrix([['threshold']+threshold,['false_positive']+false_positive,['true_positive']+true_positive],repeat_first_column=False)
else:
raise Exception("The ROC Curve is only available for multinomial/binomial models with a correct class.")
# Return the rsquared of the model for a reg model
def rsquared(model):
if (not(isinstance(model,(rf_classifier,rf_regressor,svm_classifier,svm_regressor,linear_reg,logistic_reg,naive_bayes)))):
raise TypeError("This function is not available with this model")
if (model.rsquared_val==None):
query=("select rsquared(obs,prediction) over () from (select "+model.response_column+
" as obs, predict_"+model.model_type+"("+",".join(model.predictor_columns)+" using parameters "+
"model_name='"+model.model_name+"',match_by_pos='true') as prediction from "+model.input_relation+") x;")
model.cursor.execute(query)
model.rsquared_val=model.cursor.fetchone()[0]
return abs(model.rsquared_val)
# Summarize the Model
def summarize_model(model_name,cursor):
query="select summarize_model('"+model_name+"')"
cursor.execute(query)
query_result=cursor.fetchone()[0]
return query_result
# Print Tree Id n for a RF
def tree(model,n=0):
if (not(isinstance(model,(rf_classifier,rf_regressor)))):
raise TypeError("Tree is only available with Random Forest models")
if (not(isinstance(n,int)) or (n<0)):
raise TypeError("The parameter 'n' must be a positive integer")
if (model.model_type=="rf_classifier"):
leaf_info="probability"
elif (model.model_type=="rf_regressor"):
leaf_info="variance"
query=("select read_tree(using parameters model_name='{}', treeid='{}')")
query=query.format(model.model_name,n)
model.cursor.execute(query)
query_result=model.cursor.fetchone()[0]
query_result=query_result.replace('TreeID: ','')
query_result=query_result.replace(' #node: ','')
query_result=query_result.replace(' Tree Depth: ','')
query_result=query_result.replace(' Tree Breadth: ','')
query_result=query_result.replace(' rootID: ','')
query_result=query_result.replace('NodeID: ','')
query_result=query_result.replace(' Node Depth: ','')
query_result=query_result.replace(' isLeaf: ','')
query_result=query_result.replace(' threshold: ','')
query_result=query_result.replace(' split on: ','')
query_result=query_result.replace(' isSplitCategorical: ','')
query_result=query_result.replace(' leftCategory: ','')
query_result=query_result.replace(' leftChild ID: ','')
query_result=query_result.replace(' rightChild ID: ','')
query_result=query_result.replace(' prediction: ','')
query_result=query_result.replace(' probability: ','')
query_result=query_result.split('\n')
del query_result[-1]
information=query_result[0]
tree0=[item.split(',') for item in query_result]
del tree0[0]
for item in tree0:
if (len(item)==8):
item[4]=model.predictor_columns[int(item[4])]
information=information.split(",")
try:
screen_columns=shutil.get_terminal_size().columns
except:
screen_rows, screen_columns = os.popen('stty size', 'r').read().split()
print("-"*int(screen_columns))
print("Tree Id: "+information[0])
print("Number of Nodes: "+information[1])
print("Tree Depth: "+information[2])
print("Tree Breadth: "+information[3])
print("-"*int(screen_columns))
try:
from anytree import Node, RenderTree
if (tree0[0][2]=="1"):
tree_node_1=Node("[1] => {} (probability={})".format(tree0[0][3],tree0[0][4]))
else:
tree_node_1=Node("[1]")
leaf_nb=0
for idx,item in enumerate(tree0):
if (idx!=0):
for item1 in tree0:
if ((len(item1)==8) and (item[0] in [item1[6],item1[7]])):
parent=item1
if (parent[3]=="1"):
if (parent[6]==item[0]):
op="="
else:
op="!="
elif (parent[6]==item[0]):
op="<"
else:
op=">="
if (item[2]=="0"):
exec("tree_node_{}=Node('[{}] ({}{}{}){}',parent=tree_node_{})".format(item[0],item[0],parent[4],op,parent[5],"",parent[0]))
else:
exec("tree_node_{}=Node('[{}] ({}{}{}){}',parent=tree_node_{})".format(item[0],item[0],parent[4],op,parent[5],"",parent[0]))
exec("tree_leaf_{}=Node('{} (probability={})',parent=tree_node_{})".format(leaf_nb,item[3],item[4],item[0]))
leaf_nb+=1
for pre, fill, node in RenderTree(tree_node_1):
print("%s%s" % (pre,node.name))
from anytree.dotexport import RenderTreeGraph
try:
RenderTreeGraph(tree_node_1).to_picture("anytree/"+model.model_name+"_tree"+str(n)+".png")
if rvd.isnotebook():
from IPython.core.display import HTML,display
display(HTML("<img src='anytree/"+model.model_name+"_tree"+str(n)+".png'>"))
except:
print("/!\\ Warning: Please create a folder 'anytree' where you execute the code in order to export a png of the tree.")
except:
print("/!\\ Warning: Please install the anytree package to print the tree in the terminal.")
all_infos=['NodeID','Node Depth','isLeaf','isSplitCategorical','split on','threshold','leftChildID','rightChildID','prediction',leaf_info]
for idx,item in enumerate(tree0):
if (len(item)==8):
tree0[idx]+=['-','-']
else:
tree0[idx]=[item[0],item[1],item[2],'-','-','-','-','-',item[3],item[4]]
data_columns=[]
for i in range(0,10):
data_columns+=[[all_infos[i]]+[item[i] for item in tree0]]
return rvd.column_matrix(data_columns=data_columns,first_element="NodeID",title="Tree"+information[0])
#
#############################
# __ ____ __ _ #
# \ \ / / \/ | | #
# \ \ / /| \ / | | #
# \ \/ / | |\/| | | #
# \ / | | | | |____ #
# \/ |_| |_|______| #
# #
#############################
#
#############################
# #
# Vertica Machine Learning #
# #
#############################
#
##
#####################
# #
# Cross Validation #
# #
#####################
class cross_validate:
#
# Initialization
#
def __init__(self,algorithm,input_relation,response_column,predictor_columns,cursor,
model_name=None,fold_count=5,hyperparams=None,prediction_cutoff=0.5,load=False):
drop_at_the_end=False
if not(load):
if (type(model_name)!=str):
drop_at_the_end=True
model_name="_vpython_cv_"+str(np.random.randint(10000))
query="select cross_validate('{}','{}','{}','{}' using parameters cv_model_name='{}',cv_metrics='accuracy,error_rate'"
query=query.format(algorithm,input_relation,response_column,",".join(predictor_columns),model_name)
query+=",cv_fold_count={}".format(fold_count)
if (type(hyperparams)==str):
query+=",cv_hyperparams='{}'".format(hyperparams)
if (algorithm=="logistic_reg"):
query+=",cv_prediction_cutoff={}".format(prediction_cutoff)
query+=")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.model_type="cross_validation"
print(self)
if (drop_at_the_end):
drop_model(model_name,cursor)
# Object Representation
def __repr__(self):
formatted_text="model_type='{}'\nmodel_name='{}'\nCounters:\n"+rvd.print_table(self.get_model_attribute("counters"))
formatted_text+="Fold Info:\n"+rvd.print_table(self.get_model_attribute("fold_info"))
formatted_text+="Details:\n"+rvd.print_table(self.get_model_attribute("run_details"))
formatted_text+="Averages:\n"+rvd.print_table(self.get_model_attribute("run_average"))[0:-2]
formatted_text=formatted_text.format(self.model_type,self.model_name)
if isnotebook():
return "<cross_validate>"
else:
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
def get_model_attribute(self,attr_name="run_details"):
if (attr_name in ["run_details","run_average","fold_info","counters","call_string"]):
query="select get_model_attribute(using parameters model_name='{}',attr_name='{}')".format(
self.model_name,attr_name)
self.cursor.execute(query)
columns=[column[0] for column in self.cursor.description]
all_columns=[]
query_result=self.cursor.fetchall()
for i in range(len(query_result[0])):
all_columns+=[[columns[i]]+[item[i] for item in query_result]]
return all_columns
else:
raise TypeError("'attr_name' must be in run_details|run_average|fold_info|counters|call_string")
###########
# #
# Kmeans #
# #
###########
class kmeans:
#
# Initialization
#
def __init__(self,model_name,input_relation,input_columns,num_clusters,cursor,max_iterations=10,
epsilon=1e-4,init_method="kmeanspp",initial_centers=None,load=False):
if not(load):
query="select kmeans('{}','{}','{}',{} using parameters max_iterations={},epsilon={}"
query=query.format(model_name,input_relation,",".join(input_columns),num_clusters,max_iterations,epsilon)
name="_vpython_kmeans_initial_centers_table_"+str(np.random.randint(1000000))
if (type(initial_centers)==list):
query0="drop table if exists "+name
cursor.execute(query0)
if (len(initial_centers)!=num_clusters):
print("/!\\ Warning: 'initial_centers' must be a list of 'num_clusters'={} points".format(num_clusters))
print("The 'initial_centers' will be choosed using the 'init_method'="+init_method)
else:
wrong_initial_centers=False
for item in initial_centers:
if (len(input_columns)!=len(item)):
wrong_initial_centers=True
break
if (wrong_initial_centers):
print("/!\\ Warning: Each points of 'initial_centers' must be of size len({})={}".format(
input_columns,len(input_columns)))
print("The 'initial_centers' will be choosed using the 'init_method' "+init_method)
else:
temp_initial_centers=[item for item in initial_centers]
for item in initial_centers:
del temp_initial_centers[0]
if (item in temp_initial_centers):
wrong_initial_centers=True
break
if (wrong_initial_centers):
print("/!\\ Warning: All the points of 'initial_centers' must be different")
print("The 'initial_centers' will be choosed using the 'init_method' "+init_method)
else:
query0=[]
for i in range(len(initial_centers)):
line=[]
for j in range(len(initial_centers[0])):
line+=[str(initial_centers[i][j])+" as "+input_columns[j]]
line=",".join(line)
query0+=["select "+line]
query0=" union ".join(query0)
query0="create table "+name+" as "+query0
cursor.execute(query0)
query+=",initial_centers_table='"+name+"'"
else:
query+=",init_method='"+init_method+"'"
query+=")"
cursor.execute(query)
query="drop table if exists "+name
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.input_columns=input_columns
self.num_clusters=num_clusters
self.model_type="kmeans"
self.category="clustering"
self.all_importances=None
# Object Representation
def __repr__(self):
query="select get_model_attribute(using parameters model_name='{}',attr_name='centers')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchall()
columns=[]
for i in range(0,len(self.input_columns)):
columns+=[[self.input_columns[i]]+[item[i] for item in query_result]]
if (isnotebook()):
rvd.print_table(columns)[0:-2]
formatted_text=""
else:
formatted_text="Clusters:\n"+rvd.print_table(columns)[0:-2]
formatted_text="model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\ninput_columns='{}'\n"+formatted_text
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,",".join(self.input_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# Add the Kmeans prediction to the rvd
def add_to_rvd(self,rvd,name="kmeans_cluster"+str(np.random.randint(10000))):
imputation="apply_kmeans("+",".join(self.input_columns)+" using parameters model_name='"+self.model_name+"'"+",match_by_pos='True')"
rvd.add_feature(name,imputation)
return name
# Return True if the model Converged
def converged(self):
query="select get_model_attribute(using parameters model_name='{}', attr_name='metrics')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchone()[0]
if (query_result.split("Converged: ")[1].split("\n")[0]=="True"):
return True
return False
# Features Importance
def features_importance(self,show=True):
return features_importance(self,show=show)
# Sum of Squares
def between_cluster_SS(self):
query="select get_model_attribute(using parameters model_name='{}', attr_name='metrics')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchone()[0]
return float(query_result.split("Between-Cluster Sum of Squares: ")[1].split("\n")[0])
def cluster_SS(self,show=True,display=True):
query="select get_model_attribute(using parameters model_name='{}', attr_name='metrics')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchone()[0]
all_cluster_SS=[]
for i in range(0,self.num_clusters):
all_cluster_SS+=[float(query_result.split("Cluster "+str(i)+": ")[1].split("\n")[0])]
if (show):
all_cluster_SS_pr=['Cluster SS']+all_cluster_SS
if not(isnotebook()):
print(rvd.print_table([all_cluster_SS_pr])[0:-2])
else:
rvd.print_table([all_cluster_SS_pr])[0:-2]
if (display):
plt.figure(figsize=(7,5))
plt.rcParams['axes.facecolor']='#E5E5E5'
clusters=range(0,self.num_clusters)
all_cluster_SS_pr=all_cluster_SS
plt.barh(clusters,all_cluster_SS_pr,0.9,alpha=0.86,color="dodgerblue")
plt.ylabel("Clusters")
plt.xlabel("Sum of Squares")
plt.title("Clusters Sum of Squares")
plt.gca().xaxis.grid()
plt.gca().set_axisbelow(True)
plt.yticks(clusters)
plt.show()
return all_cluster_SS
def total_SS(self):
query="select get_model_attribute(using parameters model_name='{}', attr_name='metrics')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchone()[0]
return float(query_result.split("Total Sum of Squares: ")[1].split("\n")[0])
def within_cluster_SS(self):
query="select get_model_attribute(using parameters model_name='{}', attr_name='metrics')".format(self.model_name)
self.cursor.execute(query)
query_result=self.cursor.fetchone()[0]
return float(query_result.split("Total Within-Cluster Sum of Squares: ")[1].split("\n")[0])
######################
# #
# Linear Regression #
# #
######################
class linear_reg:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,optimizer='Newton',
epsilon=1e-6,max_iterations=100,regularization="None",l=0.0,alpha=0.5,load=False):
if not(load):
query="select linear_reg('{}','{}','{}','{}' using parameters optimizer='{}',epsilon={},max_iterations={}"
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
optimizer,epsilon,max_iterations)
query+=",regularization='{}',lambda={}".format(regularization,l)
if (regularization=='ENet'):
query+=",alpha={}".format(alpha)
query+=")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="linear_reg"
self.category="regression"
self.all_importances=None
self.mse_val=None
self.rsquared_val=None
# Object Representation
def __repr__(self):
object_repr=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[4])
if not(isnotebook()):
formatted_text=formatted_text+"\nParameters:\n"+object_repr
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# Add the linear_reg prediction to the rvd
def add_to_rvd(self,rvd,name="linear_reg_pred"+str(np.random.randint(10000))):
imputation="predict_linear_reg("+",".join(self.predictor_columns)+" using parameters model_name='"+self.model_name+"'"+",match_by_pos='True')"
rvd.add_feature(name,imputation)
return name
# All the details of the model: p-value,t-value,coeffs...
def details(self):
return details(self.model_name,self.cursor)
# Features Importance
def features_importance(self,show=True,with_intercept=False):
return features_importance(self,show=show,with_intercept=with_intercept)
# MSE and RSQUARED
def metrics(self):
return reg_metrics(self)
# Return the mse of the model
def mse(self):
return mse(self)
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
r=self.parameter_value("regularization")
l=self.parameter_value("lambda")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="regularization: "+str(r)
formatted_text+="\nlambda: "+str(l)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return r,l,r_row,a_row,formatted_text
elif (parameter_name in ["regularization","l","lambda","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
print("Please use a correct parameter value: regularization|lambda|rejected_row_count|accepted_row_count|*")
return False
def plot(self,color=None,projection=None,max_nb_points=1000,show=True):
plot_reg(self,color=color,projection=projection,max_nb_points=max_nb_points,show=show)
# Return the rsquared of the model
def rsquared(self):
return rsquared(self)
########################
# #
# Logistic Regression #
# #
########################
class logistic_reg:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,optimizer='Newton',epsilon=1e-6,
max_iterations=100,regularization='None',l=1,alpha=0.5,load=False):
if not(load):
query="select logistic_reg('{}','{}','{}','{}' using parameters optimizer='{}',epsilon={},max_iterations={}"
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
optimizer,epsilon,max_iterations)
query+=",regularization='{}',lambda={}".format(regularization,l)
if (regularization=='ENet'):
query+=",alpha={}".format(alpha)
query+=")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="logistic_reg"
self.category="binomial"
self.all_importances=None
self.logloss_value=None
# Object Representation
def __repr__(self):
object_repr=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[4])
if not(isnotebook()):
formatted_text=formatted_text+"\nParameters:\n"+object_repr
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# model accuracy
def accuracy(self,threshold=0.5):
return accuracy(self,threshold,1)
# Add the logistic_reg prediction to the rvd
def add_to_rvd(self,rvd,name="logistic_reg_pred"+str(np.random.randint(10000)),prediction_type='response',cutoff=0.5):
if (prediction_type in ['response','probability']):
imputation=("predict_logistic_reg("+",".join(self.predictor_columns)+" using parameters model_name='"
+self.model_name+"',"+"type='{}',cutoff={},match_by_pos='True')".format(prediction_type,cutoff))
rvd.add_feature(name,imputation)
return name
else:
raise TypeError("Please use a correct prediction_type: response|probability")
# model auc
def auc(self):
return auc(self,1)
# Confusion Matrix
def confusion_matrix(self,threshold=0.5):
return confusion_matrix(self,threshold=threshold)
# All the details of the model: p-value,t-value,coeffs...
def details(self):
return details(self.model_name,self.cursor)
# Error Rate
def error_rate(self,threshold=0.5):
return error_rate(self,threshold=threshold)
# Features Importance
def features_importance(self,show=True,with_intercept=False):
return features_importance(self,show=show,with_intercept=with_intercept)
# Lift Table
def lift_table(self,num_bins=100,color=["dodgerblue","#444444"],show=True,input_class=1):
return lift_table(self,num_bins=num_bins,color=color,show=show)
# Log Loss
def logloss(self):
if (self.logloss_value==None):
return logloss(self)
return self.logloss_value
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
r=self.parameter_value("regularization")
l=self.parameter_value("lambda")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="regularization: "+str(r)
formatted_text+="\nlambda: "+str(l)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return r,l,r_row,a_row,formatted_text
elif (parameter_name in ["regularization","l","lambda","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
raise TypeError("Please use a correct parameter value: regularization|lambda|rejected_row_count|accepted_row_count|*")
def plot(self,marker=["o","^"],color=None,projection=None,max_nb_points=None):
def logit(x):
return 1/(1+math.exp(-x))
coefficients=self.details().data_columns
a0=float(coefficients[1][1])
alpha=[0.9,0.7]
if (type(projection)==list):
if (len(projection)>len(self.predictor_columns)):
return self.plot(max_nb_points=max_nb_points,show=show)
else:
idx=coefficients[0].index(projection[0])
a1=coefficients[1][idx]
if (len(projection)>1):
idx=coefficients[0].index(projection[1])
a2=coefficients[1][idx]
else:
a1=float(coefficients[1][2])
if (len(coefficients[1]))>=4:
a2=float(coefficients[1][3])
projection=self.predictor_columns
if ((type(max_nb_points)!=int) or (max_nb_points<0)):
if (len(projection)==1):
max_nb_points=500
else:
max_nb_points=1000
if (len(projection)==1):
if ((type(color)!=list) or (len(color)!=3)):
color=["mediumseagreen","dodgerblue","black"]
columns=[]
for i in range(2):
query="select {},random() from {} where {} is not null and {}={} order by 2 limit {}".format(projection[0],
self.input_relation,projection[0],self.response_column,i,int(max_nb_points/2))
self.cursor.execute(query)
all_points=self.cursor.fetchall()
columns+=[[float(item[0]) for item in all_points]]
plt.figure(figsize=(7,5),facecolor='#EFEFEF')
all_scatter=[]
min_f=min(columns[0]+columns[1])
max_f=max(columns[0]+columns[1])
x=np.linspace(min_f,max_f,num=1000)
y=[logit(a0+a1*item) for item in x]
plt.plot(x,y,alpha=0.5,color=color[2])
for i in range(2):
all_scatter+=[plt.scatter(columns[i],[logit(a0+a1*item) for item in columns[i]],alpha=alpha[i],marker=marker[i],color=color[i])]
plt.gca().grid()
plt.gca().set_axisbelow(True)
plt.xlabel(projection[0])
plt.ylabel("logit")
plt.legend(all_scatter,[0,1],scatterpoints=1,loc="upper right",ncol=4,title=self.response_column,fontsize=8)
plt.title(self.model_type+': '+self.response_column+'=logit('+str(round(a0,3))+"+("+str(round(a1,3))+")*"+projection[0]+")")
plt.show()
elif (len(projection)==2):
if ((type(color)!=list) or (len(color)!=3)):
color=["mediumseagreen","dodgerblue","gray"]
columns=[]
for i in range(2):
query="select {},{},random() from {} where {} is not null and {} is not null and {}={} order by 3 limit {}".format(
projection[0],projection[1],self.input_relation,projection[0],projection[1],self.response_column,i,int(max_nb_points/2))
self.cursor.execute(query)
all_points=self.cursor.fetchall()
columns+=[[[float(item[0]) for item in all_points],[float(item[1]) for item in all_points]]]
min_f1=float(min(columns[0][0]+columns[1][0]))
max_f1=float(max(columns[0][0]+columns[1][0]))
min_f2=float(min(columns[0][1]+columns[1][1]))
max_f2=float(max(columns[0][1]+columns[1][1]))
X=np.arange(min_f1,max_f1,(max_f1-min_f1)/40.0)
Y=np.arange(min_f2,max_f2,(max_f2-min_f2)/40.0)
X,Y=np.meshgrid(X, Y)
Z=1/(1+np.exp(-(a0+a1*X+a2*Y)))
fig=plt.figure(figsize=(7,5),facecolor='white')
ax=fig.add_subplot(111,projection='3d')
ax.plot_surface(X,Y,Z, rstride=1, cstride=1, alpha=0.5,color=color[2])
all_scatter=[]
logit3D=[[],[]]
for i in range(2):
for j in range(len(columns[i][0])):
logit3D[i]+=[a0+columns[i][0][j]*a1+columns[i][1][j]*a2]
for i in range(2):
logit3D[i]=[logit(item) for item in logit3D[i]]
all_scatter+=[ax.scatter(columns[i][0],columns[i][1],logit3D[i],alpha=alpha[i],marker=marker[i],color=color[i])]
ax.set_xlabel(projection[0])
ax.set_ylabel(projection[1])
ax.set_zlabel("logit")
ax.legend(all_scatter,[0,1],scatterpoints=1,loc="lower left",ncol=4,title=self.response_column,fontsize=8,bbox_to_anchor=(0.9,1))
plt.title(self.model_type+': '+self.response_column+'=logit('+str(round(a0,3))+"+("+str(round(a1,3))+
")*"+projection[0]+"+("+str(round(a2,3))+")*"+projection[1]+")")
plt.show()
else:
print("/!\\ Warning: The dimension is too big.")
print("Please use the 'projection' parameter to see a projection of your figure")
def roc(self,num_bins=100,color=["dodgerblue","#444444"],show=True):
return roc(self,num_bins=num_bins,color=color,show=show)
################
# #
# Naive Bayes #
# #
################
class naive_bayes:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,alpha=1.0,load=False):
if not(load):
query="select naive_bayes('{}','{}','{}','{}' using parameters alpha={}"
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
alpha)+")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="naive_bayes"
self.logloss_value=None
query="select {} from {} group by {}".format(response_column,input_relation,response_column)
cursor.execute(query)
query_result=cursor.fetchall()
classes=[item for sublist in query_result for item in sublist]
classes.sort()
if (len(classes)>2):
self.category="multinomial"
else:
self.category="binomial"
self.classes=classes
# Object Representation
def __repr__(self):
formatted_text=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[3]+"\nProbabilities:\n"+formatted_text)
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# model accuracy
def accuracy(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return accuracy(self,threshold,input_class)
# Add the naive_bayes prediction to the rvd
def add_to_rvd(self,rvd,name="naive_bayes_pred"+str(np.random.randint(10000)),prediction_type='response',input_class=None):
if (prediction_type in ['response','probability']):
imputation=("predict_naive_bayes("+",".join(self.predictor_columns)+" using parameters model_name='"
+self.model_name+"',"+"type='{}'".format(prediction_type))
if ((prediction_type=="probability") and ((input_class in self.classes) or (int(input_class) in self.classes))):
imputation+=",class='{}'".format(input_class)
imputation+=",match_by_pos='True')+0"
rvd.add_feature(name,imputation)
return name
else:
raise TypeError("Please use a correct prediction_type: response|probability")
# model auc
def auc(self,input_class=None):
if not((input_class in self.classes) or (str(input_class) in self.classes)):
input_class=self.classes[1]
return auc(self,input_class)
# Confusion Matrix
def confusion_matrix(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return confusion_matrix(self,threshold=threshold,input_class=input_class)
# All the details of the model: probabilities
def details(self):
return details(self.model_name,self.cursor,attr_name="prior")
# Error Rate
def error_rate(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return error_rate(self,threshold=threshold,input_class=input_class)
# Lift Table
def lift_table(self,num_bins=100,color=["dodgerblue","#444444"],show=True,input_class=None):
if (self.category=="binomial"):
input_class=self.classes[1]
elif (input_class==None):
input_class=self.classes[0]
return lift_table(self,num_bins=num_bins,color=color,show=show,input_class=input_class)
# Log Loss
def logloss(self):
if (self.logloss_value==None):
return logloss(self)
return self.logloss_value
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
alpha=self.parameter_value("alpha")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="alpha: "+str(alpha)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return alpha,r_row,a_row,formatted_text
elif (parameter_name in ["alpha","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
print("Please use a correct parameter value: alpha|rejected_row_count|accepted_row_count|*")
return False
# ROC
def roc(self,num_bins=100,color=["dodgerblue","#444444"],show=True,input_class=None):
if (self.category=="binomial"):
input_class=self.classes[1]
elif (input_class==None):
input_class=self.classes[0]
return roc(self,num_bins=num_bins,color=color,show=show,input_class=input_class)
#############################
# #
# Random Forest Classifier #
# #
#############################
class rf_classifier:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,ntree=20,
mtry=None,sampling_size=0.632,max_depth=5,max_breadth=32,min_leaf_size=1,min_info_gain=0.0,
nbins=32,load=False):
if not(load):
if (mtry==None):
mtry=max(int(len(predictor_columns)/3),1)
query=("select rf_classifier('{}','{}','{}','{}' using parameters ntree={},mtry={},sampling_size={},"+
"max_depth={},max_breadth={},min_leaf_size={},min_info_gain={},nbins={}")
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
ntree,mtry,sampling_size,max_depth,max_breadth,min_leaf_size,min_info_gain,nbins)+")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="rf_classifier"
self.logloss_value=None
query="select {} from {} group by {}".format(response_column,input_relation,response_column)
cursor.execute(query)
query_result=cursor.fetchall()
classes=[item for sublist in query_result for item in sublist]
classes.sort()
if (len(classes)>2):
self.category="multinomial"
else:
self.category="binomial"
self.classes=classes
# Object Representation
def __repr__(self):
object_repr=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[3])
if not(isnotebook()):
formatted_text=formatted_text+"\n"+object_repr
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# model accuracy
def accuracy(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return accuracy(self,threshold,input_class)
# Add the rf_classifier prediction to the rvd
def add_to_rvd(self,rvd,name="rf_classifier_pred"+str(np.random.randint(10000)),prediction_type='response',input_class=None):
if (prediction_type in ['response','probability']):
imputation=("predict_rf_classifier("+",".join(self.predictor_columns)+" using parameters model_name='"
+self.model_name+"',"+"type='{}'".format(prediction_type))
if ((prediction_type=="probability") and ((input_class in self.classes) or (int(input_class) in self.classes))):
imputation+=",class='{}'".format(input_class)
imputation+=",match_by_pos='True')+0"
rvd.add_feature(name,imputation)
return name
else:
raise TypeError("Please use a correct prediction_type: response|probability")
# model auc
def auc(self,input_class=None):
if not((input_class in self.classes) or (str(input_class) in self.classes)):
input_class=self.classes[1]
return auc(self,input_class)
# Confusion Matrix
def confusion_matrix(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return confusion_matrix(self,threshold=threshold,input_class=input_class)
# All the details of the model: probabilities
def details(self):
return details(self.model_name,self.cursor,model_type="rf")
# Error Rate
def error_rate(self,threshold=0.5,input_class=None):
if (len(self.classes)==2) and (input_class==None):
input_class=self.classes[1]
return error_rate(self,threshold=threshold,input_class=input_class)
# Lift Table
def lift_table(self,num_bins=100,color=["dodgerblue","#444444"],show=True,input_class=None):
if (self.category=="binomial"):
input_class=self.classes[1]
elif (input_class==None):
input_class=self.classes[0]
return lift_table(self,num_bins=num_bins,color=color,show=show,input_class=input_class)
# Log Loss
def logloss(self):
if (self.logloss_value==None):
return logloss(self)
return self.logloss_value
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
t_count=self.parameter_value("tree_count")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="tree_count: "+str(t_count)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return t_count,r_row,a_row,formatted_text
elif (parameter_name in ["tree_count","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
raise TypeError("Please use a correct parameter value: tree_count|rejected_row_count|accepted_row_count|*")
# ROC
def roc(self,num_bins=100,color=["dodgerblue","#444444"],show=True,input_class=None):
if (self.category=="binomial"):
input_class=self.classes[1]
elif (input_class==None):
input_class=self.classes[0]
return roc(self,num_bins=num_bins,color=color,show=show,input_class=input_class)
# Print Tree Id n
def tree(self,n=0):
return tree(self,n)
############################
# #
# Random Forest Regressor #
# #
############################
class rf_regressor:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,ntree=20,
mtry=None,sampling_size=0.632,max_depth=5,max_breadth=32,min_leaf_size=5,min_info_gain=0.0,
nbins=32,load=False):
if not(load):
if (mtry==None):
mtry=max(int(len(predictor_columns)/3),1)
query=("select rf_regressor('{}','{}','{}','{}' using parameters ntree={},mtry={},sampling_size={},"+
"max_depth={},max_breadth={},min_leaf_size={},min_info_gain={},nbins={}")
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
ntree,mtry,sampling_size,max_depth,max_breadth,min_leaf_size,min_info_gain,nbins)+")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="rf_regressor"
self.category="regression"
self.mse_val=None
self.rsquared_val=None
# Object Representation
def __repr__(self):
object_repr=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[3])
if not(isnotebook()):
formatted_text=formatted_text+"\n"+object_repr
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# Add the rf_regressor prediction to the rvd
def add_to_rvd(self,rvd,name="rf_regressor_pred"+str(np.random.randint(10000))):
imputation="predict_rf_regressor("+",".join(self.predictor_columns)+" using parameters model_name='"+self.model_name+"'"+",match_by_pos='True')"
rvd.add_feature(name,imputation)
return name
# All the details of the model: p-value,t-value,coeffs...
def details(self):
return details(self.model_name,self.cursor,model_type="rf")
# MSE and RSQUARED
def metrics(self):
return reg_metrics(self)
# Return the mse of the model
def mse(self):
return mse(self)
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
t_count=self.parameter_value("tree_count")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="tree_count: "+str(t_count)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return t_count,r_row,a_row,formatted_text
elif (parameter_name in ["tree_count","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
raise TypeError("Please use a correct parameter value: regularization|lambda|rejected_row_count|accepted_row_count|*")
# Return the rsquared of the model
def rsquared(self):
return rsquared(self)
# Print Tree Id n
def tree(self,n=0):
return tree(self,n)
###################
# #
# SVM Classifier #
# #
###################
class svm_classifier:
#
# Initialization
#
def __init__(self,model_name,input_relation,response_column,predictor_columns,cursor,C=1.0,epsilon=1e-3,
max_iterations=100,load=False):
if not(load):
query="select svm_classifier('{}','{}','{}','{}' using parameters C={},epsilon={},max_iterations={}"
query=query.format(model_name,input_relation,response_column,",".join(predictor_columns),
C,epsilon,max_iterations)+")"
cursor.execute(query)
self.cursor=cursor
self.model_name=model_name
self.input_relation=input_relation
self.response_column=response_column
self.predictor_columns=predictor_columns
self.model_type="svm_classifier"
self.category="binomial"
self.all_importances=None
# Object Representation
def __repr__(self):
object_repr=self.details().__repr__()
formatted_text=("model_type='{}'\nmodel_name='{}'\ninput_relation='{}'\nresponse_column='{}'\npredictor_columns='{}'\n"+
self.parameter_value(show=False)[3])
if not(isnotebook()):
formatted_text=formatted_text+"\nParameters:\n"+object_repr
formatted_text=formatted_text.format(self.model_type,self.model_name,self.input_relation,self.response_column,",".join(self.predictor_columns))
return formatted_text
#
###########
# #
# Methods #
# #
###########
#
# model accuracy
def accuracy(self):
return accuracy(self,0.5,1)
# Add the svm_classifier prediction to the rvd
def add_to_rvd(self,rvd,name="svm_classifier_pred"+str(np.random.randint(10000))):
imputation="predict_svm_classifier("+",".join(self.predictor_columns)+" using parameters model_name='"+self.model_name+"'"+",match_by_pos='True')"
rvd.add_feature(name,imputation)
return name
# model auc
def auc(self):
return auc(self,0.5,1)
# Confusion Matrix
def confusion_matrix(self):
return confusion_matrix(self)
# All the details of the model: p-value,t-value,coeffs...
def details(self):
return details(self.model_name,self.cursor)
# Error Rate
def error_rate(self):
return error_rate(self)
# Features Importance
def features_importance(self,show=True,with_intercept=False):
return features_importance(self,show=show,with_intercept=with_intercept)
# Lift Table
def lift_table(self,num_bins=100,color=["dodgerblue","#444444"],show=True):
return lift_table(self,num_bins=num_bins,color=color,show=show)
# Log Loss
def logloss(self):
if (self.logloss_value==None):
return logloss(self)
return self.logloss_value
# Return the value of the concerned parameter: use * to see them all
def parameter_value(self,parameter_name="*",show=True):
if (parameter_name=="*"):
iteration=self.parameter_value("iteration_count")
r_row=self.parameter_value("rejected_row_count")
a_row=self.parameter_value("accepted_row_count")
formatted_text="iteration_count: "+str(iteration)
formatted_text+="\nrejected_row_count: "+str(r_row)
formatted_text+="\naccepted_row_count: "+str(a_row)
if (show):
print(formatted_text)
return iteration,r_row,a_row,formatted_text
elif (parameter_name in ["iteration_count","rejected_row_count","accepted_row_count"]):
return parameter_value(self,parameter_name=parameter_name)
else:
raise TypeError("Please use a correct parameter value: iteration_count|rejected_row_count|accepted_row_count|*")
# Plot the model
def plot(self,marker=["o","^"],color=None,projection=None,max_nb_points=None):
coefficients=self.details().data_columns
a0=float(coefficients[1][1])
alpha=[0.9,0.7]
if (type(projection)==list):
if (len(projection)>len(self.predictor_columns)):
self.plot(max_nb_points=max_nb_points,show=show)
else:
idx=coefficients[0].index(projection[0])
a1=coefficients[1][idx]
if (len(projection)>1):
idx=coefficients[0].index(projection[1])
a2=coefficients[1][idx]
if (len(projection)>2):
idx=coefficients[0].index(projection[2])
a3=float(coefficients[1][4])
else:
a1=float(coefficients[1][2])
if (len(coefficients[1]))>=4:
a2=float(coefficients[1][3])
if (len(coefficients[1]))>=5:
a3=float(coefficients[1][4])
projection=self.predictor_columns
if ((type(max_nb_points)!=int) or (max_nb_points<0)):
if (len(projection)==1):
max_nb_points=40
elif (len(projection)==2):
max_nb_points=400
else:
max_nb_points=1000
if (len(projection)==1):
if ((type(color)!=list) or (len(color)!=3)):
color=["mediumseagreen","dodgerblue","black"]
columns=[]
for i in range(2):
query="select {},random() from {} where {} is not null and {}={} order by 2 limit {}".format(projection[0],
self.input_relation,projection[0],self.response_column,i,int(max_nb_points/2))
self.cursor.execute(query)
all_points=self.cursor.fetchall()
columns+=[[float(item[0]) for item in all_points]]
border=-a0/a1
plt.figure(figsize=(7,5),facecolor='white')
all_scatter=[]
for i in range(2):
all_scatter+=[plt.scatter(columns[i],[0]*len(columns[i]),alpha=alpha[i],marker=marker[i],color=color[i])]
plt.plot([border,border],[-1,1],color=color[2])
plt.gca().grid()
plt.gca().set_axisbelow(True)
plt.xlabel(self.predictor_columns[0])
plt.gca().get_yaxis().set_ticks([])
plt.legend(all_scatter,[0,1],scatterpoints=1,loc="upper right",ncol=4,title="response",fontsize=8)
plt.title(self.model_type+': '+self.response_column+'=sign('+str(round(a0,3))+"+("+str(round(a1,3))+")*"+projection[0]+")")
plt.show()
elif (len(projection)==2):
if ((type(color)!=list) or (len(color)!=3)):
color=["mediumseagreen","dodgerblue","black"]
columns=[]
for i in range(2):
query="select {},{},random() from {} where {} is not null and {} is not null and {}={} order by 3 limit {}".format(
projection[0],projection[1],self.input_relation,projection[0],projection[1],self.response_column,i,int(max_nb_points/2))
self.cursor.execute(query)
all_points=self.cursor.fetchall()
columns+=[[[float(item[0]) for item in all_points],[float(item[1]) for item in all_points]]]
plt.figure(figsize=(7,5),facecolor='white')
all_scatter=[]
for i in range(2):
all_scatter+=[plt.scatter(columns[i][0],columns[i][1],alpha=alpha[i],marker=marker[i],color=color[i])]
min_f=min(columns[0][0]+columns[1][0])
max_f=max(columns[0][0]+columns[1][0])
plt.plot([min_f,max_f],[-(a0+a1*min_f)/a2,-(a0+a1*max_f)/a2],color=color[2])
plt.gca().grid()
plt.gca().set_axisbelow(True)
plt.xlabel(projection[0])
plt.ylabel(projection[1])
plt.legend(all_scatter,[0,1],scatterpoints=1,loc="upper right",ncol=4,title="response",fontsize=8)
plt.title(self.model_type+': '+self.response_column+'=sign('+str(round(a0,3))+"+("+str(round(a1,3))+
")*"+projection[0]+"+("+str(round(a2,3))+")*"+projection[1]+")")
plt.show()
elif (len(projection)==3):
if ((type(color)!=list) or (len(color)!=3)):
color=["mediumseagreen","dodgerblue","gray"]
columns=[]
for i in range(2):
query="select {},{},{},random() from {} where {} is not null and {} is not null and {} is not null and {}={} order by 4 limit {}".format(
projection[0],projection[1],projection[2],self.input_relation,projection[0],projection[1],projection[2],self.response_column,i,int(max_nb_points/2))
self.cursor.execute(query)
all_points=self.cursor.fetchall()
columns+=[[[float(item[0]) for item in all_points],[float(item[1]) for item in all_points],[float(item[2]) for item in all_points]]]
min_f1=float(min(columns[0][0]+columns[1][0]))
max_f1=float(max(columns[0][0]+columns[1][0]))
min_f2=float(min(columns[0][1]+columns[1][1]))
max_f2=float(max(columns[0][1]+columns[1][1]))
X=np.arange(min_f1,max_f1,(max_f1-min_f1)/20.0)
Y= | np.arange(min_f2,max_f2,(max_f2-min_f2)/20.0) | numpy.arange |
import unittest
import warnings
import numpy as np
from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous
from girth import (rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml,
rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml,
grm_mml, rasch_conditional, standard_errors_bootstrap)
def _contains_keys(results, identifier):
"""Checks for standard keys in bootstrap result."""
for key in ['Standard Errors', '95th CI', 'Bias', 'Solution']:
if key not in results.keys():
raise AssertionError(f"Key: {key} not found in return argument."
f"Error in {identifier}")
for key in results['95th CI']:
if np.any(results['95th CI'][key][1] < results['95th CI'][key][0]):
raise AssertionError(f"Confidence Interval Error. {key} "
f"Error in {identifier}")
warnings.filterwarnings('ignore')
class TestBootstrapStandardErrors(unittest.TestCase):
"""Test Fixture for Bootstrap Standard Errors."""
# Smoke Tests to make sure they give an output
# Tests bootstrap errors
def setUp(self):
rng = np.random.default_rng(48725309847520)
self.discrimination = 0.25 + rng.rayleigh(.7, 5)
self.difficulty = np.linspace(-1.5, 1.5, 5)
self.difficulty_poly = np.sort(rng.standard_normal((5, 3)), axis=1)
self.theta = rng.standard_normal(1000)
self.options = {'max_iteration': 2}
self.boot_iter = 10
def test_jml_methods_dichotomous(self):
"""Testing Bootstrap on JML Methods Dichotomous."""
rng = np.random.default_rng(39485720394875)
dataset = create_synthetic_irt_dichotomous(self.difficulty, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, rasch_jml, n_processors=1,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertEqual(result['Standard Errors']['Discrimination'][0], 0)
_contains_keys(result, 'Rasch JML')
result = standard_errors_bootstrap(dataset, onepl_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '1PL JML')
result = standard_errors_bootstrap(dataset, twopl_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '2PL JML')
@unittest.skip(reason="Github")
def test_jml_methods_polytomous(self):
"""Testing Bootstrap on JML Methods Polytomous."""
rng = np.random.default_rng(8672379287302651089)
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, grm_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'GRM JML')
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng, model='pcm')
result = standard_errors_bootstrap(dataset, pcm_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'PCM JML')
@unittest.skip(reason="Github")
def test_rasch_conditional(self):
"""Testing rasch conditional methods."""
rng = | np.random.default_rng(426376867989075563) | numpy.random.default_rng |
"""
Deep Learning on Graphs - ALTEGRAD - Dec 2019
"""
import numpy as np
import networkx as nx
import time
import torch
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, log_loss
from utils import accuracy, normalize_adjacency
from models import GNN
# Hyperparameters
epochs = 100
n_hidden_1 = 8
n_hidden_2 = 16
learning_rate = 0.01
dropout_rate = 0.1
# Loads the karate network
G = nx.read_weighted_edgelist('karate.edgelist', delimiter=' ', nodetype=int, create_using=nx.Graph())
print(G.number_of_nodes())
print(G.number_of_edges())
n = G.number_of_nodes()
# Loads the class labels
class_labels = np.loadtxt('karate_labels.txt', delimiter=',', dtype=np.int32)
idx_to_class_label = dict()
for i in range(class_labels.shape[0]):
idx_to_class_label[class_labels[i,0]] = class_labels[i,1]
y = list()
for node in G.nodes():
y.append(idx_to_class_label[node])
y = np.array(y)
n_class = 2
adj = nx.to_numpy_matrix(G) # Obtains the adjacency matrix
adj = normalize_adjacency(adj) # Normalizes the adjacency matrix
############## Task 12
# Set the feature of all nodes to the same value
# Yields indices to split data into training and test sets
idx = | np.random.RandomState(seed=42) | numpy.random.RandomState |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import pytest
import numpy as np
import math
import random
from graspologic.match import GraphMatch as GMP
from graspologic.simulations import er_np
np.random.seed(0)
class TestGMP:
@classmethod
def setup_class(cls):
cls.barycenter = GMP(gmp=False)
cls.rand = GMP(n_init=100, init="rand", gmp=False)
cls.barygm = GMP(gmp=True)
def test_SGM_inputs(self):
with pytest.raises(TypeError):
GMP(n_init=-1.5)
with pytest.raises(ValueError):
GMP(init="random")
with pytest.raises(TypeError):
GMP(max_iter=-1.5)
with pytest.raises(TypeError):
GMP(shuffle_input="hey")
with pytest.raises(TypeError):
GMP(eps=-1)
with pytest.raises(TypeError):
GMP(gmp="hey")
with pytest.raises(TypeError):
GMP(padding=2)
with pytest.raises(ValueError):
GMP(padding="hey")
with pytest.raises(ValueError):
GMP().fit(
np.random.random((3, 4)),
np.random.random((3, 4)),
np.arange(2),
np.arange(2),
)
with pytest.raises(ValueError):
GMP().fit(
np.random.random((3, 4)),
np.random.random((3, 4)),
np.arange(2),
np.arange(2),
)
with pytest.raises(ValueError):
GMP().fit(np.identity(3), np.identity(3), np.identity(3), np.arange(2))
with pytest.raises(ValueError):
GMP().fit(np.identity(3), np.identity(3), np.arange(1), np.arange(2))
with pytest.raises(ValueError):
GMP().fit(np.identity(3), np.identity(3), | np.arange(5) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 11:47:18 2016
@author: sebalander
"""
# %% IMPORTS
from matplotlib.pyplot import plot, imshow, legend, show, figure, gcf, imread
from matplotlib.pyplot import xlabel, ylabel
from cv2 import Rodrigues # , homogr2pose
from numpy import max, zeros, array, sqrt, roots, diag, sum, log
from numpy import sin, cos, cross, ones, concatenate, flipud, dot, isreal
from numpy import linspace, polyval, eye, linalg, mean, prod, vstack
from numpy import ones_like, zeros_like, pi, float64, transpose
from numpy import any as anny
from numpy.linalg import svd, inv, det
from scipy.linalg import norm
from scipy.special import chdtri
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# from copy import deepcopy as dc
from importlib import reload
from calibration import StereographicCalibration as stereographic
from calibration import UnifiedCalibration as unified
from calibration import RationalCalibration as rational
from calibration import FisheyeCalibration as fisheye
from calibration import PolyCalibration as poly
reload(stereographic)
reload(unified)
reload(rational)
reload(fisheye)
reload(poly)
def f64(x):
return array(x, dtype=float64)
# %% calss that holds all data
class syntintr:
def __init__(self, k=None, uv=None, s=None, camera=None, model=None):
self.k = k
self.uv = uv
self.s = s
self.camera = camera
self.model = model
class syntches:
def __init__(self, nIm=None, nPt=None, rVecs=None, tVecs=None, objPt=None,
imgPt=None, imgNse=None):
self.nIm = nIm
self.nPt = nPt
self.rVecs = rVecs
self.tVecs = tVecs
self.objPt = objPt
self.imgPt = imgPt
self.imgNse = imgNse
class syntextr:
def __init__(self, ang=None, h=None, rVecs=None, tVecs=None, objPt=None,
imgPt=None, index10=None, imgNse=None):
self.ang = ang
self.h = h
self.rVecs = rVecs
self.tVecs = tVecs
self.objPt = objPt
self.imgPt = imgPt
self.index10 = index10
self.imgNse = imgNse
class synt:
def __init__(self, Intr=None, Ches=None, Extr=None):
self.Intr = Intr
self.Ches = Ches
self.Extr = Extr
class realches:
def __init__(self, nIm=None, nPt=None, objPt=None, imgPt=None,
imgFls=None):
self.nIm = nIm
self.nPt = nPt
self.objPt = objPt
self.imgPt = imgPt
self.imgFls = imgFls
class realbalk:
def __init__(self, objPt=None, imgPt=None, priorLLA=None, imgFl=None):
self.objPt = objPt
self.imgPt = imgPt
self.priorLLA = priorLLA
self.imgFl = imgFl
class realdete:
def __init__(self, carGPS=None, carIm=None):
self.carGPS = carGPS
self.carIm = carIm
class real:
def __init__(self, Ches=None, Balk=None, Dete=None):
self.Ches = Ches
self.Balk = Balk
self.Dete = Dete
class datafull:
'''
Nested namedtuples that hold the data for the paper
Data
Synt
Intr # listo: SyntIntr
camera 'vcaWide' string camera model
model string indicating camera intrinsic model
['poly', 'rational', 'fisheye', 'stereographic']
s is the image size
k sintehtic stereographic parameter
uv = s / 2 is the stereographic optical center
Ches # listo: SyntChes
nIm number of images
nPt number of point in image
objPt chessboard model grid
rVecs synth rotation vectors
tVecs synth tVecs
imgPt synth corners projected from objPt with synth params
imgNse noise of 1 sigma for the image
Extr # listo: SyntExtr
ang angles of synth pose tables
h heights of synth pose tables
rVecs rotation vectors associated to angles
tVecs tVecs associated to angles and h
objPt distributed 3D points on the floor
imgPt projected to image
imgNse noise for image detection, sigma 1
index10 indexes to select 10 points well distributed
Real
Ches # listo: RealChes
nIm number of chess images
nPt number of chess points per image
objPt chessboard model, 3D
imgPt detected corners in chessboard images
imgFls list of paths to the chessboard images
Balk
objPt calibration world points, lat lon
imgPt image points for calibration
priLLA prior lat-lon-altura
imgFl camera snapshot file
Dete
carGps car gps coordinates
carIm car image detection traces
'''
def __init__(self, Synt=None, Real=None):
self.Synt = Synt
self.Real = Real
# %% Z=0 PROJECTION
def euler(al, be, ga):
'''
devuelve matriz de rotacion según angulos de euler.
Craigh, pag 42
las rotaciones son en este orden:
ga: alrededor de X
be: alrededor de Y
al: alrededor de Z
'''
ca, cb, cg = cos([al, be, ga])
sa, sb, sg = sin([al, be, ga])
rot = array([[ca*cb, ca*sb*sg-sa*cg, ca*sb*cg+sa*sg],
[sa*cb, sa*sb*sg+ca*cg, sa*sb*cg+ca*sa],
[-sb, cb*sg, cb*cg]])
return rot
def unit2CovTransf(C):
'''
returns the matrix that transforms points from unit normal pdf to a normal
pdf of covariance C. so that
T = cl.unit2CovTransf(C) # calculate transform matriz
X = np.random.randn(N, M, 2) # gen random points unitary normal
X = (X.reshape((N, M, 1, 2)) * # transform
T.reshape((1, M, 2, 2))
).sum(-1)
'''
u, s, v = svd(C)
if C.ndim is 2:
return u.dot(diag(sqrt(s))).dot(v.T)
elif C.ndim is 3:
n = s.shape[0]
d = s.shape[1]
s = sqrt(s)
v = v.transpose((0, 2, 1))
T = (u.reshape((-1, d, d, 1)) *
s.reshape((n, 1, d, 1)) *
v.reshape((n, 1, d, d)))
return T.sum(2)
else:
print('las dimensiones no se que onda')
return -1
# %% BASIC ROTOTRASLATION
def rotateRodrigues(x, r):
'''
rotates given vector x or list x of vectors as per rodrigues vector r
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
'''
r.shape = 3
th = norm(r)
rn = r / th
ct = cos(th)
st = sin(th)
try:
# if x is just one point
x.shape = 3
return x * ct + cross(rn, x) * st + rn * dot(x, rn) * (1 - ct)
except:
# if x has many points
x.shape = (-1, 3)
aux1 = x * ct + cross(rn, x) * st
aux2 = rn.reshape((-1, 1)) * dot(x, rn) * (1 - ct)
return aux1 + aux2.T
def rotoTrasRodri(x, r, t):
'''
rototraslates all x points using r and t
'''
t.shape = 3
return rotateRodrigues(x, r) + t
def rotoTrasRodriInverse(x, r, t):
'''
rototraslates all x points using r and t inversely
'''
t.shape = 3
return rotateRodrigues(x - t, -r)
def rotoTrasHomog(x, r, t):
'''
rototraslates points x and projects to homogenous coordinates
'''
x2 = rotoTrasRodri(x, r, t)
xh, yh = x2[:, :2].T / x2[:, 2]
'''
import matplotlib.pyplot as plt
plt.figure()
plt.plot(xh, yh,'.')
plt.plot()
'''
return xh, yh
# %% PARAMETER HANDLING
def formatParameters(rVec, tVec, cameraMatrix, distCoeffs, model):
switcher = {
'stereographic': stereographic.formatParameters,
'unified': unified.formatParameters,
'rational': rational.formatParameters,
'poly': poly.formatParameters,
'fisheye': fisheye.formatParameters
}
return switcher[model](rVec, tVec, cameraMatrix, distCoeffs)
def retrieveParameters(params, model):
switcher = {
'stereographic': stereographic.retrieveParameters,
'unified': unified.retrieveParameters,
'rational': rational.retrieveParameters,
'poly': poly.retrieveParameters,
'fisheye': fisheye.retrieveParameters
}
return switcher[model](params)
# %% DIRECT PROJECTION
def hom2ccd(xd, yd, cameraMatrix):
xccd = cameraMatrix[0, 0] * xd + cameraMatrix[0, 2]
yccd = cameraMatrix[1, 1] * yd + cameraMatrix[1, 2]
return vstack((xccd, yccd)).T
# switcher for radial distortion
distort = {
'stereographic': stereographic.radialDistort,
'unified': unified.radialDistort,
'rational': rational.radialDistort,
'poly': poly.radialDistort,
'fisheye': fisheye.radialDistort
}
def direct(objectPoints, rVec, tVec, cameraMatrix, distCoeffs, model,
ocv=False):
'''
performs projection form 3D world into image, is the "direct" distortion
optionally it uses opencv's function if available
'''
xh, yh = rotoTrasHomog(objectPoints, rVec, tVec)
rh = norm([xh, yh], axis=0)
q = distort[model](rh, distCoeffs, quot=True)
# print(xHomog.shape, q.shape)
xd = q * xh
yd = q * yh
# project to ccd
return hom2ccd(xd, yd, cameraMatrix)
def residualDirect(params, objectPoints, imagePoints, model):
switcher = {
'stereographic': stereographic.residualDirect,
'unified': unified.residualDirect,
'rational': rational.residualDirect,
'poly': poly.residualDirect,
'fisheye': fisheye.residualDirect
}
return switcher[model](params, objectPoints, imagePoints)
def calibrateDirect(objectPoints, imagePoints, rVec, tVec, cameraMatrix,
distCoeffs, model):
switcher = {
'stereographic': stereographic.calibrateDirect,
'unified': unified.calibrateDirect,
'rational': rational.calibrateDirect,
'poly': poly.calibrateDirect,
'fisheye': fisheye.calibrateDirect
}
return switcher[model](objectPoints, imagePoints, rVec, tVec,
cameraMatrix, distCoeffs)
# %% INVERSE PROJECTION
def ccd2disJacobian(xi, yi, cameraMatrix):
'''
returns jacobian to propagate uncertainties in ccd2homogemous mapping
Jd_i: jacobian wrt image coordiantes
Jd_f: jacobian wrt linear CCD parameters
'''
xi, yi, cameraMatrix = [f64(xi), f64(yi), f64(cameraMatrix)]
Jd_i = diag(1 / cameraMatrix[[0, 1], [0, 1]]) # doesn't depend on position
unos = ones_like(xi, dtype=float64)
ceros = zeros_like(unos, dtype=float64)
a = - unos / cameraMatrix[0, 0]
b = (cameraMatrix[0, 2] - xi) * a**2
c = - unos / cameraMatrix[1, 1]
d = (cameraMatrix[1, 2] - yi) * c**2
Jd_f = array([[b, ceros, a, ceros], [ceros, d, ceros, c]])
Jd_f = Jd_f.transpose((2, 0, 1)) # first index iterates points
return Jd_i, Jd_f
def ccd2dis(xi, yi, cameraMatrix, Ci=False, Cf=False):
'''
maps from CCd, image to homogenous distorted coordiantes.
must provide covariances for every point if cov is not None
Cf: is the covariance matrix of intrinsic linear parameters fx, fy, u, v
(in that order).
'''
xi, yi, cameraMatrix = [f64(xi), f64(yi), f64(cameraMatrix)]
# undo CCD projection, asume diagonal ccd rescale
xd = (xi - cameraMatrix[0, 2]) / cameraMatrix[0, 0]
yd = (yi - cameraMatrix[1, 2]) / cameraMatrix[1, 1]
Cibool = anny(Ci)
Cfbool = anny(Cf)
if Cibool or Cfbool:
Cd = zeros((xd.shape[0], 2, 2), dtype=float64) # create cov matrix
Jd_i, Jd_f = ccd2disJacobian(xi, yi, cameraMatrix)
if Cibool:
Jd_iResh = Jd_i.reshape((-1, 2, 2, 1, 1))
Cd += (Jd_iResh *
Ci.reshape((-1, 1, 2, 2, 1)) *
Jd_iResh.transpose((0, 4, 3, 2, 1))
).sum((2, 3))
if Cfbool:
# propagate uncertainty via Jacobians
Jd_fResh = Jd_f.reshape((-1, 2, 4, 1, 1))
Cd += (Jd_fResh *
Cf.reshape((-1, 1, 4, 4, 1)) *
Jd_fResh.transpose((0, 4, 3, 2, 1))
).sum((2, 3))
else:
Cd = False # return without covariance matrix
Jd_f = False
return xd, yd, Cd, Jd_f
# switcher for radial un-distortion
undistort = {
'stereographic': stereographic.radialUndistort,
'unified': unified.radialUndistort,
'rational': rational.radialUndistort,
'poly': poly.radialUndistort,
'fisheye': fisheye.radialUndistort
}
def dis2hom_ratioJacobians(xd, yd, distCoeffs, model):
'''
returns the distortion ratio and the jacobians with respect to undistorted
coords and distortion params.
'''
xd, yd, distCoeffs = [f64(xd), f64(yd), f64(distCoeffs)]
# calculate ratio of undistortion
rd = norm([xd, yd], axis=0)
q, ret, dQdH, dQdK = undistort[model](rd, distCoeffs, quot=True,
der=True)
xh = xd / q
yh = yd / q
rh = rd / q
# jacobiano D (distort) respecto a coord homogeneas
xyh = xh * yh
Jd_h = array([[xh**2, xyh], [xyh, yh**2]]) / rh
Jd_h *= dQdH.reshape(1, 1, -1)
Jd_h[[0, 1], [0, 1], :] += q
# jacobiano D (distort) respecto a parametros de distorsion optica
Jd_k = | array([xh * dQdK, yh * dQdK]) | numpy.array |
"""Calculated accumulated property with respect to other property."""
from typing import Optional
import numpy as np
from phonopy.phonon.dos import NormalDistribution
from phono3py.other.tetrahedron_method import get_integration_weights
from phono3py.phonon.grid import BZGrid
epsilon = 1.0e-8
class KappaDOS:
"""Class to calculate thermal conductivity spectram."""
def __init__(
self,
mode_kappa,
frequencies,
bz_grid: BZGrid,
ir_grid_points,
ir_grid_map=None,
frequency_points=None,
num_sampling_points=100,
):
"""Init method.
mode_kappa : ndarray
Target value.
shape=(temperatures, ir_grid_points, num_band, num_elem),
dtype='double'
frequencies : ndarray
shape=(ir_grid_points, num_band), dtype='double'
bz_grid : BZGrid
ir_grid_points : ndarray
Ir-grid point indices in BZ-grid.
shape=(ir_grid_points, ), dtype='int_'
ir_grid_map : ndarray, optional, default=None
Mapping table to ir-grid point indices in GR-grid.
None gives `np.arange(len(frequencies), 'int_')`.
frequency_points : array_like, optional, default=None
This is used as the frequency points. When None,
frequency points are created from `num_sampling_points`.
num_sampling_points : int, optional, default=100
Number of uniform sampling points.
"""
min_freq = min(frequencies.ravel())
max_freq = max(frequencies.ravel()) + epsilon
if frequency_points is None:
self._frequency_points = np.linspace(
min_freq, max_freq, num_sampling_points, dtype="double"
)
else:
self._frequency_points = np.array(frequency_points, dtype="double")
n_temp, _, _, n_elem = mode_kappa.shape
self._kdos = np.zeros(
(n_temp, len(self._frequency_points), 2, n_elem), dtype="double"
)
if ir_grid_map is None:
bzgp2irgp_map = None
else:
bzgp2irgp_map = self._get_bzgp2irgp_map(bz_grid, ir_grid_map)
for j, function in enumerate(("J", "I")):
iweights = get_integration_weights(
self._frequency_points,
frequencies,
bz_grid,
grid_points=ir_grid_points,
bzgp2irgp_map=bzgp2irgp_map,
function=function,
)
for i, iw in enumerate(iweights):
self._kdos[:, :, j] += np.transpose(
np.dot(iw, mode_kappa[:, i]), axes=(1, 0, 2)
)
self._kdos /= | np.prod(bz_grid.D_diag) | numpy.prod |
import os
from .common import Benchmark
import numpy as np
class Records(Benchmark):
def setup(self):
self.l50 = np.arange(1000)
self.fields_number = 10000
self.arrays = [self.l50 for _ in range(self.fields_number)]
self.formats = [self.l50.dtype.str for _ in range(self.fields_number)]
self.formats_str = ','.join(self.formats)
self.dtype_ = np.dtype(
[
('field_{}'.format(i), self.l50.dtype.str)
for i in range(self.fields_number)
]
)
self.buffer = self.l50.tostring() * self.fields_number
def time_fromarrays_w_dtype(self):
np.core.records.fromarrays(self.arrays, dtype=self.dtype_)
def time_fromarrays_wo_dtype(self):
| np.core.records.fromarrays(self.arrays) | numpy.core.records.fromarrays |
"""
Name: Coordinates.py
Description: Marie-Annes Python code for converting between Horizon and Sky coordinate systems.
"""
from __future__ import division
import numpy as np
from astropy.time import TimeDelta
from numpy.random import random_sample as randomu
jd2000 = 2451545.0
def _premat(equinox1, equinox2, FK4=True):
"""
Return precession matrix needed to go from equinox1 to equinox2
equinox1 - original equinox of coordinates
equinox2 - equinox to precess to
returns 3x3 precession matrix
Shameless stolen from astrolib premat.pro
"""
d2r = np.pi/180.
s2r = d2r/3600. # convert seconds to radians
T = 1e-3*(equinox2 - equinox1)
if FK4:
ST = 1e-3*(equinox1 - 2000.)
# Compute 3 rotation angles
A = s2r * T * \
(23062.181 + ST * (139.656 + 0.0139*ST) \
+ T * (30.188 - 0.344 * ST + 17.998 * T))
B = s2r * T * T * (79.280 + 0.410*ST + 0.205*T) + A
C = s2r * T * (20043.109 - ST*(85.33 + 0.217*ST) \
+ T*(-42.665 - 0.217*ST - 41.833*T))
else:
A = 0.
B = 0.
C = 0.
sina = np.sin(A)
sinb = np.sin(B)
sinc = np.sin(C)
cosa = np.cos(A)
cosb = np.cos(B)
cosc = np.cos(C)
R = np.zeros((3, 3))
R[:,0] = np.array([ cosa*cosb*cosc - sina*sinb,
sina*cosb+cosa*sinb*cosc,
cosa*sinc]).flatten()
R[:,1] = np.array([-cosa*sinb - sina*cosb*cosc,
cosa*cosb - sina*sinb*cosc,
-sina*sinc]).flatten()
R[:,2] = np.array([-cosb*sinc,
-sinb*sinc,
cosc]).flatten()
return R
def _precess(_ra, _dec, equinox1, equinox2):
"""
Precess coordinate system from equinox1 to equinox2.
ra and dec are inputs in radians.
Shameless stolen from astrolib routine precess.pro
(Based on procedure from Computational Spherical Astronomy by Taff (1983).
p. 24.)
"""
ra = _ra*np.pi/180.
dec= _dec*np.pi/180.
a = np.cos(dec)
vec1 = np.array([a*np.cos(ra),
a*np.sin(ra),
np.sin(dec)]).T # cartesian vector on sphere
R0 = _premat(equinox1, equinox2) # get rotation matrix
print(vec1.shape)
vec1 = np.reshape(vec1, (vec1.shape[0], 1, vec1.shape[1]))
R0 = np.reshape(R0, (1, R0.shape[0], R0.shape[1]))
vec2 = np.sum(R0*vec1, axis=-1)
#vec2 = R0.dot(vec1)
ra_out = np.arctan2(vec2[:,1], vec2[:,0])
dec_out= np.arcsin(vec2[:,2])
#if ra_out < 0:
# ra_out += 2.*np.pi
#w = np.where((ra_out < 0))[0]
#if len(w) > 0:
# ra_out[w] += 2.*np.pi
return np.mod(ra_out*180./np.pi, 360), dec_out*180./np.pi
def _jd2gst(jd):
"""
Convert julian dates into Greenwich Sidereal Time.
From Practical Astronomy With Your Calculator.
"""
jd0 = np.floor(jd - 0.5) + 0.5
T = (jd0 - 2451545.0) / 36525
T0 = 6.697374558 + 2400.051336 * T + 0.000025862 * T**2
T0 %= 24
ut = (jd - jd0) * 24
T0 += ut * 1.002737909
T0 %= 24
return T0
def _gst2lst(gst, geolon):
"""
Convert Greenwich Sidereal Time into Local Sidereal Time.
"""
# geolon: Geographic longitude EAST in degrees.
return (gst + geolon / 15.) % 24
def _jd2lst(lng, jd):
c = [280.46061837, 360.98564736629, 0.000387933, 38710000.0 ]
t0 = jd - jd2000
t = t0/36525.
theta = c[0] + (c[1] * t0) + t**2*(c[2] - t/ c[3] )
lst = ( theta + lng)/15.0
neg = np.where(lst < 0.0)
n = len(neg)
if n > 0:
lst[neg] = 24. + np.mod(lst[neg] , 24)
lst = np.mod(lst , 24.)
return lst
def _pang(el, dec, geolat):
"""
Generate parallactic angle from elevation and declination
"""
d2r = np.pi/180.0
r2d = 180.0/np.pi
top = np.sin(geolat*d2r) - np.sin(el*d2r)*np.sin(dec*d2r)
bot = np.cos(el*d2r)*np.cos(dec*d2r)
p = np.arccos(top/bot)
if isinstance(p, type(np.array([]))):
p[np.isnan(p)] = 0
p[p > np.pi/2.] -= np.pi
else:
if np.isnan(p):
p = 0
else:
if p > np.pi/2.:
p -= np.pi
return p*r2d
def _equ2hor(_ra, _dec, _jd, geolat, geolon, precess=False):
"""
Convert from ra/dec to az/el (by <NAME>).
All inputs as degrees
"""
# Imports
from numpy import arccos, arcsin, cos, pi, sin, where
ra = np.array([_ra]).flatten()
dec= np.array([_dec]).flatten()
jd = np.array([_jd]).flatten()
lst = _gst2lst(_jd2gst(jd), geolon)
if precess:
J_now = np.mean( (jd - 2451545.)/365.25 + 2000.0 )
ra, dec = _precess(ra, dec, 2000., J_now)
#for i in range(len(J_now)):
# ra[i], dec[i] = _precess(ra[i], dec[i], 2000., J_now[i])
az, el = _equ2hor_lst(ra, dec, lst, geolat)
# Later
return az, el
def _equ2hor_lst(_ra, _dec, _lst, geolat):
from numpy import arccos, arcsin, cos, pi, sin, where
ra = np.array([_ra]).flatten()
dec= np.array([_dec]).flatten()
lst = np.array([_lst]).flatten()
d2r = pi/180.0
r2d = 180.0/pi
sin_dec = sin(dec*d2r)
cos_dec = cos(dec*d2r)
phi_rad = geolat*d2r
sin_phi = sin(phi_rad)
cos_phi = cos(phi_rad)
ha = 15.0*_ra2ha(ra, lst)
sin_ha = sin(ha*d2r)
cos_ha = cos(ha*d2r)
x = - cos_ha * cos_dec * sin_phi + sin_dec * cos_phi
y = - sin_ha * cos_dec
z = cos_ha * cos_dec * cos_phi + sin_dec * sin_phi
r = np.sqrt(x**2 + y**2)
az = np.arctan2(y, x)*180./np.pi
el = np.arctan2(z, r)*180./np.pi
w = (az < 0)
az[w] = az[w] + 360.
return az, el
def _hor2equ(_az, _el, _jd, geolat, geolon, precess=False):
"""
Convert from az/el to ra/dec (by <NAME>).
All inputs in degrees
"""
# Imports
from numpy import arccos, arcsin, cos, pi, sin, where
az = np.array([_az]).flatten()
el = np.array([_el]).flatten()
jd = np.array([_jd]).flatten()
#lst = _gst2lst(_jd2gst(jd), geolon)
lst = _jd2lst(geolon, jd)
ra, dec = _hor2equ_lst(az, el, lst, geolat)
if precess:
J_now = np.mean( (jd - 2451545.)/365.25 + 2000.0 )
ra, dec = _precess(ra, dec, 2000., J_now)
# Later
return ra, dec
def _hor2equ_lst(_az, _el, _lst, geolat):
az = np.array([_az]).flatten()
el = np.array([_el]).flatten()
lst = np.array([_lst]).flatten()
from numpy import arccos, arcsin, cos, pi, sin, where
d2r = pi/180.0
r2d = 180.0/pi
az_r = az*np.pi/180.
el_r = el*np.pi/180.
geolat_r = geolat*np.pi/180.
# Convert to equatorial coordinates
cos_el = cos(el_r)
sin_el = sin(el_r)
cos_phi = cos(geolat_r)
sin_phi = sin(geolat_r)
cos_az = cos(az_r)
sin_az = sin(az_r)
sin_dec = sin_el*sin_phi + cos_el*cos_phi*cos_az
dec = arcsin(sin_dec)
ha = [-sin_az*cos_el, -cos_az*sin_phi*cos_el + sin_el*cos_phi]
ha = np.arctan2(ha[0], ha[1])
w = np.where(ha < 0)[0]
if len(w) != 0:
ha[w] = ha[w] + np.pi*2.
ha = np.mod(ha, np.pi*2.)
ra = lst*15.0*np.pi/180.-ha
ra = where(ra >= 2.*np.pi, ra - 2.*np.pi, ra)
ra = where(ra < 0.0, ra + 2.*np.pi, ra)
ra *= 180./np.pi
dec *= 180./np.pi
return ra, dec
def _ra2ha(ra, lst):
"""
Converts a right ascension to an hour angle.
"""
return (lst - ra / 15.0) % 24
def _equ2gal(_ra, _dec):
"""
Converts right ascension and declination to Galactic lon and lat.
Uses rotation matrix Rg from 'Spherical Astronomy by <NAME>, Chapter 14, page 355'
"""
ra = _ra*np.pi/180.
dec= _dec*np.pi/180.
equVec = np.array([[np.cos(ra)*np.cos(dec)],
[np.sin(ra)*np.cos(dec)],
[np.sin(dec)]])
tg = 0.
ag = (17. + 45.6/60.)*15.*np.pi/180.
dg = -28.94*np.pi/180.
Rg = np.array([[-0.054876, -0.873437, -0.483835],
[ 0.494109, -0.444830, 0.746982],
[-0.867666, -0.198076, 0.455984]])
Rg = np.reshape(Rg, (3, 3, 1))
Rg = np.transpose(Rg, [1,0,2])
test = Rg*equVec
galVec = np.sum(Rg*equVec, axis=0)#Rg.dot(equVec)
lon = np.arctan2(galVec[1],galVec[0])
lat = np.pi/2. - np.arctan2(np.sqrt(galVec[0]**2 + galVec[1]**2), galVec[2])
lon = lon *180./np.pi
lat = lat*180./np.pi
return lon, lat
def _gal2equ(gl, gb, year=2000):
"""
Shamelessly copied from the IDL glactc routine.
"""
rapol = (12. + 49./60.)*np.pi/180.*15.
decpol= (27.4)*np.pi/180.
dlon = (123.0)*np.pi/180.
sdp = np.sin(decpol)
cdp = np.sqrt(1.0 - sdp*sdp)
sgb = np.sin(gb)
cgb = np.sqrt(1. - sgb**2)
sdec = sgb*sdp + cgb*cdp*np.cos(dlon - gl)
dec = np.arcsin(sdec)
cdec = np.sqrt(1.-sdec**2)
sinf = cgb * np.sin(dlon-gl)/cdec
cosf = (sgb-sdp*sdec)/(cdp*cdec)
ra = rapol + np.arctan2(sinf, cosf)
ra *= 180./np.pi
dec *= 180./np.pi
if year != 2000:
ra, dec = _precess(ra, dec, 2000., year)
ra[ra > 360] -= 360
return np.mod(ra, 360), dec
def _nutate(jd):
dtor = np.pi/180.
T = (jd[:] - 2451545.0)/36525.0
# Mean elongation of the Moon
coeff1 = np.array([297.85036, 445267.111480, -0.0019142, 1./189474.])
d = np.mod(np.polyval(coeff1[::-1], T)*dtor, 2*np.pi)
d = np.reshape(d, (d.size, 1))
# Sun's mean anomaly
coeff2 = np.array([357.5277, 35999.050340, -0.0001603, -1./3e5 ])
m = np.mod(np.polyval(coeff2[::-1], T)*dtor, 2.*np.pi)
m = np.reshape(m, (m.size, 1))
# Moon's mean anomaly
coeff3 = np.array([134.96298, 477198.867398, 0.0086972, 1.0/5.625e4 ])
mprime = np.mod(np.polyval(coeff3[::-1], T)*dtor, 2.*np.pi)
mprime = np.reshape(mprime, (mprime.size, 1))
# Moon's argument of latitude
coeff4 = np.array([93.27191, 483202.017538, -0.0036825, -1.0/3.27270e5 ])
f = np.mod(np.polyval(coeff4[::-1], T)*dtor, 2.*np.pi)
f = np.reshape(f, (f.size, 1))
# Longitude of the ascending node of the Moon's mean orbit on the ecliptic,
# measured from the mean equinox of the date
coeff5 = np.array([125.04452, -1934.136261, 0.0020708, 1./4.5e5])
omega = np.mod(np.polyval(coeff5[::-1], T)*dtor, 2.*np.pi)
omega = np.reshape(omega, (omega.size, 1))
d_lng = np.array([0,-2,0,0,0,0,-2,0,0,-2,-2,-2,0,2,0,2,0,0,-2,0,2,0,0,-2,0,-2,0,0,2,
-2,0,-2,0,0,2,2,0,-2,0,2,2,-2,-2,2,2,0,-2,-2,0,-2,-2,0,-1,-2,1,0,0,-1,0,0,
2,0,2])
d_lng = np.reshape(d_lng, (d_lng.size, 1))
m_lng = np.concatenate(( np.array([0,0,0,0,1,0,1,0,0,-1]),np.zeros(17),np.array([2,0,2,1,0,-1,0,0,0,1,1,-1,0,
0,0,0,0,0,-1,-1,0,0,0,1,0,0,1,0,0,0,-1,1,-1,-1,0,-1]) ))
m_lng = np.reshape(m_lng, (m_lng.size, 1))
mp_lng = | np.array([0,0,0,0,0,1,0,0,1,0,1,0,-1,0,1,-1,-1,1,2,-2,0,2,2,1,0,0,-1,0,-1,
0,0,1,0,2,-1,1,0,1,0,0,1,2,1,-2,0,1,0,0,2,2,0,1,1,0,0,1,-2,1,1,1,-1,3,0])
mp_lng = np.reshape(mp_lng, (mp_lng.size, 1)) | numpy.array |
from __future__ import print_function, division
import matplotlib
#matplotlib.use('Agg')
import numpy as np
import scipy as sp
from operator import truediv
import math, time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from itertools import groupby
import sisl as si
from numbers import Integral
# I don't know why, but the lines below were
# fucking up my routine "makeTB_FrameOutside", on the "contruct" command
#try:
# from itertools import izip as zip
#except:
# pass
def dagger(M):
return np.conjugate(np.transpose(M))
def displaySparse(m, filename, dpi=300):
if not isinstance(m, sp.sparse.coo_matrix):
m = sp.sparse.coo_matrix(m)
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='black')
ax.plot(m.col, m.row, 's', color='white', ms=10)
ax.set_xlim(0, m.shape[1])
ax.set_ylim(0, m.shape[0])
ax.set_aspect('equal')
for spine in ax.spines.values():
spine.set_visible(False)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(filename, facecolor='black', edgecolor='black', dpi=dpi)
return ax
def get_potential(TSHS, iio, atoms):
"""
iio: index (0-based) of orbital in basis set (i.e., pz in SZP: iio = 2)
"""
orbs = TSHS.a2o(atoms)+iio
on = TSHS.Hk(dtype=np.float64, format='array')[orbs, orbs]
return on
def check_Dirac(ts, mp, displacement=[0,0,0]):
mp = si.MonkhorstPack(ts, mp, displacement=displacement)
print('Check that Dirac is in here: ')
print(mp.k)
print('Check that this is in *.KP file : {}'.format(mp.tocartesian([0., 1./3, 0]) * si.unit.siesta.unit_convert('Bohr', 'Ang')))
i_dirac = (np.logical_and(mp.k[:,1] == 1./3, mp.k[:,0] == 0.)).nonzero()[0]
if len(i_dirac) != 1:
print('Dirac point is not in the grid')
exit(1)
else:
print('Dirac point is at kindex: {}'.format(i_dirac[0]))
def get_Dirac(hs, mp, displacement=[0,0,0]):
#check_Dirac(hs.geom, mp, displacement)
ens_dirac = hs.eigh(k=[0., 1./3, 0])
i_dirac = hs.na * 2 - 1
return np.average(ens_dirac[i_dirac:i_dirac+2])
def plot_PotDiff(TSHS, TSHS_0, ia, axis, iio, o_dev, o_inner): # include option for frame!
on, yy, atoms = get_potential(TSHS, ia, axis, iio)
on0 = get_potential(TSHS_0, ia, axis, iio)[0]
on0 = np.array([np.mean(on0)]*len(on))
# Check
print('y (Ang)\t\tPot (eV)\tPot0 (eV)\tPot-Pot0 (eV)')
a_dev = TSHS.o2a(o_dev, unique=True)
a_inner = TSHS.o2a(o_inner, unique=True)
for iia, y, o, o0 in zip(atoms, yy, on, on0):
if iia in a_inner:
print('{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t(inner)'.format(y,o,o0,o-o0))
else:
print('{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t{:7.4f}'.format(y,o,o0,o-o0))
# Subtract pristine potential
PotDiff = on-on0
# Write to file
with open('PotDiff.dat', 'w') as pf:
for yc, pd in zip(yy, PotDiff):
pf.write('{}\t\t{}\n'.format(yc, pd))
# Plot
figure()
plot(yy, PotDiff, 'b')
md, Md = np.amin(TSHS.xyz[a_dev, axis]), np.amax(TSHS.xyz[a_dev, axis])
axvline(md, color='k', linestyle='dashed', linewidth=2)
axvline(Md, color='k', linestyle='dashed', linewidth=2)
tmp_dev = TSHS.geom.sub(a_dev); tmp_inner = tmp_dev.sub(a_inner)
mi, Mi = np.amin(tmp_inner.xyz[a_inner, axis]), np.amax(tmp_inner.xyz[a_inner, axis])
axvspan(mi, Mi, alpha=0.3, facecolor='blue', edgecolor='none')
ylabel(r'$H_{p_z}-H^0_{p_z}\, (e{\rm V})$', fontsize=20)
xlabel(r'$y\, (\AA)$', fontsize=20)
xlim(0, TSHS.cell[axis, axis])
#xlim(TSHS.center(what='cell')[1], TSHS.cell[1,1])
legend(loc=0); savefig('PotDiff.pdf', bbox_inches='tight')
def get_potential_profile(TSHS, ia, axis, iio):
"""
ia: atom crossed by the line
axis: direction of the line
iio: index (0-based) of orbital in basis set (i.e., pz in SZP: iio = 2)
"""
# Find atoms in line passing by center of
xyz0, xyz = TSHS.xyz[ia, axis%1], TSHS.xyz[:, axis%1]
atoms = np.where(np.logical_and(xyz0-1.43 < xyz, xyz < xyz0+1.43))[0]
v = TSHS.geom.copy(); v.atom[atoms] = si.Atom(8, R=[1.43]); v.write('checkPot.xyz')
orbs = TSHS.a2o(atoms)+iio
on = TSHS.Hk(dtype=np.float64, format='array')[orbs, orbs]
ylist = TSHS.xyz[atoms, axis]
idxs = np.argsort(ylist)
on, ylist = on[idxs], ylist[idxs]
return on, ylist, atoms
def xyz2polar(tbt, origin=0):
na = tbt.na
# radii from origin
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
_, r = tbt.geom.close_sc(origin, R=np.inf, ret_rij=True)
# angles from origin
transl = tbt.geom.translate(-origin)
y = transl.xyz[:,1]
i_ypos = np.where(y >= 0)[0]
i_yneg = np.setdiff1d(np.arange(na), i_ypos)
t = np.zeros(na)
t[i_ypos] = transl.angle(i_ypos, dir=(1., 0, 0), rad=True)
t[i_yneg] = transl.angle(i_yneg, dir=(-1., 0, 0), rad=True) +np.pi
return r, t
def radial_T_from_bc(tbt, elec, E=None, kavg=True,
origin=0, thetamin=0., thetamax=2*np.pi, ntheta=360,
Rmin=5., Rmax=999999999, dr=40.,
input=None, save='radial_T_from_bc.txt', saveinput='rt.txt'):
if E:
Eidx = tbt.Eindex(E)
en = tbt.E[Eidx]
else:
en = tbt.E[0]
print('Using E = {} eV'.format(en))
na = tbt.na
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
# (x, y) ----> (r, t)
if input:
r, t = np.loadtxt(input, delimiter='\t', usecols=(1, 2), unpack=True, skiprows=1)
else:
r, t = xyz2polar(tbt, origin=origin)
f = open(saveinput, 'w')
f.write('ia\tr (Angstrom)\tangle (radians; center {})\n'.format(origin))
for ia, rr, tt in zip(np.arange(na), r, t):
f.write('{}\t{}\t{}\n'.format(ia, rr, tt))
f.close()
print('(x,y) ---> (r,t): DONE')
# theta bins
thetas = np.linspace(thetamin, thetamax, ntheta, endpoint=False)
dtheta = thetas[1]-thetas[0]
print(len(thetas), dtheta, thetas)
# Digitize t into thetas
inds = np.digitize(t, thetas) -1 # First bin is associated to 0.0 rad
print('Digitize theta: DONE')
# radii[i] is the radius of the interface between 2 crowns centered at the position of the tip
newRmax = np.amin(np.absolute(np.array([origin[0], origin[1],
(origin-tbt.cell[0]-tbt.cell[1])[0], (origin-tbt.cell[0]-tbt.cell[1])[1]])))
radii = np.arange(np.amax([Rmin, dr]), np.amin([Rmax, newRmax])+2*dr, dr)
nradii = len(radii)
print(nradii, dr, radii)
# indices of atom within the various shells
# atoms in list ishell[i] belong to [radii[i], radii[i+1]]
ishell = tbt.geom.close_sc(origin, R=radii, idx=tbt.a_dev)
print('Close: DONE')
# Read bond-current
bc = tbt.bond_current(0, en, kavg=kavg, only='all', uc=True)
print('bc: DONE')
Tavg = np.zeros(ntheta*nradii)
thetas_toplot = Tavg.copy()
radii_toplot = Tavg.copy()
j=0
for id in np.arange(ntheta): # Loop over unique angles
print(' Doing theta #{} of {} ({} rad)'.format(id+1, ntheta, thetas[id]))
idx_intheta = np.where(inds == id)[0] # find indices of atoms whose t is in sector theta
for id_r in np.arange(1,nradii-1): # Loop over unique radii
print(' Doing radius #{} of {} ({} Ang)'.format(id_r, nradii, radii[id_r]))
idx_1_indr = ishell[id_r] # Indices of atoms within internal shell
mask = np.in1d(idx_1_indr, idx_intheta)
idx_1 = idx_1_indr[mask] # Indices of atoms in internal shell AND sector theta
idx_2 = ishell[id_r+1] # # Indices of atoms within external shell
Tavg[j] = bc[idx_1.reshape(-1, 1), idx_2.reshape(1, -1)].sum()
thetas_toplot[j] = thetas[id]
radii_toplot[j] = radii[id_r]
#print(' ({} Ang, {} rad) --> {}'.format(radii_toplot[j], thetas_toplot[j], Tavg[j]))
j+=1
# Write
f = open(save, 'w')
f.write('center {}\n'.format(origin))
f.write('radius (Ang), \t theta (rad), \tT from radial bond current\n')
for rr, theta, ttt in zip(radii_toplot, thetas_toplot, Tavg):
f.write('{}\t{}\t{}\n'.format(rr, theta, ttt))
f.close()
return radii_toplot, thetas_toplot, Tavg
def atom_current_radial(tbt, elec, E, kavg=True, activity=True,
origin=0, thetamin=0., thetamax=2*np.pi, ntheta=360,
Rmin=5., Rmax=999999999, dr=40.,
input=None, save='atom_current_radial.txt', saveinput='ac_input.txt'):
if E:
Eidx = tbt.Eindex(E)
en = tbt.E[Eidx]
else:
en = tbt.E[0]
print('Using E = {} eV'.format(en))
na = tbt.na
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
# (x, y) ----> (r, t)
if input:
r, t, ac = np.loadtxt(input, delimiter='\t', usecols=(1, 2, 3), unpack=True, skiprows=1)
else:
r, t = xyz2polar(tbt, origin=origin)
print('start extraction of atom_current...')
ac = tbt.atom_current(elec, E, kavg, activity)
print('...end extraction of atom_current')
f = open(saveinput, 'w')
f.write('ia\tr (Ang)\tangle (rad; center {})\tatom current\n'.format(origin))
for ia, rr, tt, a in zip(np.arange(na), r, t, ac):
f.write('{}\t{}\t{}\t{}\n'.format(ia, rr, tt, a))
f.close()
print('(x,y) ---> (r,t): DONE')
# theta bins
thetas = np.linspace(thetamin, thetamax, ntheta, endpoint=False)
dtheta = thetas[1]-thetas[0]
print('Thetas entries:')
print(len(thetas), dtheta, thetas)
# Digitize t into thetas
inds = np.digitize(t, thetas) -1 # First bin is associated to 0.0 rad
print('Digitize theta: DONE')
# radii[i] is the radius of the interface between 2 crowns centered at the position of the tip
newRmax = np.amin(np.absolute(np.array([origin[0], origin[1],
(origin-tbt.cell[0]-tbt.cell[1])[0], (origin-tbt.cell[0]-tbt.cell[1])[1]])))
radii = np.arange(np.amax([Rmin, dr]), np.amin([Rmax, newRmax])+dr, dr)
nradii = len(radii)
print('Radii entries:')
print(nradii, dr, radii)
# indices of atom within the various shells
# atoms in list ishell[i] belong to [radii[i], radii[i+1]]
#ishell = tbt.geom.close_sc(origin, R=radii, idx=tbt.a_dev)
#print('Close: DONE')
current_r = np.zeros((nradii, ntheta))
for ir, rr in enumerate(radii): # Loop over unique radii
current_t = np.zeros(ntheta)
counts_t = current_t.copy()
inR = np.where(r < rr)[0]
for id, a in zip(inds[inR], ac[inR]):
current_t[id] += a
counts_t[id] += 1
current_r[ir, :] = np.divide(current_t, counts_t)
# Write
np.savetxt(save, np.transpose(np.vstack([thetas, current_r])), delimiter='\t',
newline='\n', comments='', header=', '.join(str(e) for e in radii))
return radii, thetas, current_r
def plot_LDOS(geom, LDOS, figname='figure.png',
vmin=None, vmax=None):
import matplotlib.collections as collections
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
x, y = geom.xyz[:,0], geom.xyz[:,1]
fig, ax = plt.subplots()
ax.set_aspect('equal')
vmin, vmax = vmin, vmax
if vmin is None:
vmin = np.min(LDOS)
if vmax is None:
vmax = np.max(LDOS)
colors = LDOS
area = 15
image = ax.scatter(x, y, c=colors, s=area, marker='o', edgecolors='None', cmap='viridis')
image.set_clim(vmin, vmax)
image.set_array(LDOS)
ax.autoscale()
ax.margins(0.1)
plt.xlabel('$x (\AA)$')
plt.ylabel('$y (\AA)$')
plt.gcf()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
axcb = plt.colorbar(image, cax=cax, format='%1.2f', ticks=[vmin, vmax])
plt.savefig(figname, bbox_inches='tight', transparent=True, dpi=300)
print('Successfully plotted to "{}"'.format(figname))
def CAP(geometry, side, dz_CAP=30, write_xyz=True, zaxis=2):
# Determine orientation
if zaxis == 2:
xaxis, yaxis = 0, 1
elif zaxis == 0:
xaxis, yaxis = 1, 2
elif zaxis == 1:
xaxis, yaxis = 0, 2
# Natural units (see "http://superstringtheory.com/unitsa.html")
hbar = 1
m = 0.511e6 # eV
c = 2.62
print('\nSetting up CAP regions: {}'.format(side))
print('Width of absorbing walls = {} Angstrom'.format(dz_CAP))
Wmax = 100
dH_CAP = si.Hamiltonian(geometry, dtype='complex128')
CAP_list = []
### EDGES
if 'right' in side:
print('Setting at right')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
z2 = np.max(geometry.xyz[:, xaxis]) + 1.
z1 = z2 - dz_CAP
idx = np.where(np.logical_and(z1 <= z, z < z2))[0]
fz = (4/(c**2)) * ((dz_CAP/(z2-2*z1+z[idx]))**2 + (dz_CAP/(z2-z[idx]))**2 - 2 )
Wz = ((hbar**2)/(2*m)) * (2*np.pi/(dz_CAP/2000))**2 * fz
orbs = dH_CAP.geom.a2o(idx) # if you have just 1 orb per atom, then orb = ia
for orb,wz in zip(orbs, Wz):
dH_CAP[orb, orb] = complex(0, -wz)
CAP_list.append(idx)
#print(list2range_TBTblock(idx))
if 'left' in side:
print('Setting at left')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
z2 = np.min(geometry.xyz[:, xaxis]) - 1.
z1 = z2 + dz_CAP
idx = np.where(np.logical_and(z2 < z, z <= z1))[0]
fz = (4/(c**2)) * ((dz_CAP/(z2-2*z1+z[idx]))**2 + (dz_CAP/(z2-z[idx]))**2 - 2 )
Wz = ((hbar**2)/(2*m)) * (2*np.pi/(dz_CAP/2000))**2 * fz
orbs = dH_CAP.geom.a2o(idx) # if you have just 1 orb per atom, then orb = ia
for orb,wz in zip(orbs, Wz):
dH_CAP[orb, orb] = complex(0, -wz)
CAP_list.append(idx)
#print(list2range_TBTblock(idx))
if 'top' in side:
print('Setting at top')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
y2 = np.max(geometry.xyz[:, yaxis]) + 1.
y1 = y2 - dz_CAP
idx = np.where(np.logical_and(y1 <= y, y < y2))[0]
fz = (4/(c**2)) * ( (dz_CAP/(y2-2*y1+y[idx]))**2 + (dz_CAP/(y2-y[idx]))**2 - 2 )
Wz = ((hbar**2)/(2*m)) * (2*np.pi/(dz_CAP/2000))**2 * fz
orbs = dH_CAP.geom.a2o(idx) # if you have just 1 orb per atom, then orb = ia
for orb,wz in zip(orbs, Wz):
dH_CAP[orb, orb] = complex(0, -wz)
CAP_list.append(idx)
#print(list2range_TBTblock(idx))
if 'bottom' in side:
print('Setting at bottom')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
y2 = np.min(geometry.xyz[:, yaxis]) - 1.
y1 = y2 + dz_CAP
idx = np.where(np.logical_and(y2 < y, y <= y1))[0]
fz = (4/(c**2)) * ( (dz_CAP/(y2-2*y1+y[idx]))**2 + (dz_CAP/(y2-y[idx]))**2 - 2 )
Wz = ((hbar**2)/(2*m)) * (2*np.pi/(dz_CAP/2000))**2 * fz
orbs = dH_CAP.geom.a2o(idx) # if you have just 1 orb per atom, then orb = ia
for orb,wz in zip(orbs, Wz):
dH_CAP[orb, orb] = complex(0, -wz)
CAP_list.append(idx)
#print(list2range_TBTblock(idx))
CAP_list = np.concatenate(CAP_list).ravel().tolist()
if write_xyz:
# visualize CAP regions
visualize = geometry.copy()
visualize.atom[CAP_list] = si.Atom(8, R=[1.44])
visualize.write('CAP.xyz')
return dH_CAP
def read_fullTSHS(HSfilename, geomFDFfilename):
""" Read Hamiltonian and Geometry objects
and update Atoms properties of 'TSHS' from 'FDF' """
if isinstance(HSfilename, str):
HSfile = si.get_sile(HSfilename).read_hamiltonian()
else:
HSfile = HSfilename.copy()
if isinstance(geomFDFfilename, str):
geomFDF = si.get_sile(geomFDFfilename).read_geometry(True)
else:
geomFDF = geomFDFfilename.copy()
# Update species
for ia, (a, afdf) in enumerate(zip(HSfile.atom, geomFDF.atom)):
A = si.Atom(afdf.Z, a.orbital, afdf.mass, afdf.tag)
HSfile.atom[ia] = A
HSfile.reduce()
return HSfile
def T_from_bc(tbt, elec, idx_1, idx_2, E=None, kavg=True, write_xyz=None):
if write_xyz: # visualize regions
visualize = tbt.geom.copy()
visualize.atom[idx_1] = si.Atom(8, R=[1.44])
visualize.atom[idx_2] = si.Atom(9, R=[1.44])
visualize.write('{}.xyz'.format(write_xyz))
if E:
Eidx = tbt.Eindex(E)
energies = np.array([tbt.E[Eidx]])
else:
energies = tbt.E
T = np.zeros(len(energies))
for ie,e in enumerate(energies):
print('Doing E # {} of {} ({} eV)'.format(ie+1, len(energies), e))
bc = tbt.bond_current(elec, e, kavg=kavg, only='all', uc=True)
T[ie] += bc[idx_1.reshape(-1, 1), idx_2.reshape(1, -1)].sum()
return T
def T_from_bc_from_orbital(tbt, elec, o_idx, idx_1, idx_2, E=None,
kavg=True, write_xyz=None):
if write_xyz: # visualize regions
visualize = tbt.geom.copy()
visualize.atom[idx_1] = si.Atom(8, R=[1.44])
visualize.atom[idx_2] = si.Atom(9, R=[1.44])
visualize.write('{}.xyz'.format(write_xyz))
if E:
Eidx = tbt.Eindex(E)
energies = np.array([tbt.E[Eidx]])
else:
energies = tbt.E
T = np.zeros(len(energies))
for ie,e in enumerate(energies):
print('Doing E # {} of {} ({} eV)'.format(ie+1, len(energies), e))
Jij = tbt.orbital_current(elec, e, kavg=kavg)
orbs_1 = tbt.geom.a2o(idx_1) + o_idx
orbs_2 = tbt.geom.a2o(idx_2) + o_idx
T[ie] = Jij[orbs_1.reshape(-1, 1), orbs_2.reshape(1, -1)].sum()
#bc = tbt.bond_current(elec, e, kavg=kavg, only='all', uc=True)
return T
def list2range_TBTblock(lst):
""" Convert a list of elements into a string of ranges
Examples
--------
>>> list2range([2, 4, 5, 6])
2, 4-6
>>> list2range([2, 4, 5, 6, 8, 9])
2, 4-6, 8-9
"""
lst = [el+1 for el in lst]
lst.sort()
# Create positions
pos = [j - i for i, j in enumerate(lst)]
t = 0
rng = ''
for _, els in groupby(pos):
ln = len(list(els))
el = lst[t]
if t > 0:
rng += '\n'
t += ln
if ln == 1:
rng += ' atom ['+str(el)+']'
else:
rng += ' atom [{} -- {}]'.format(el, el+ln-1)
return rng
def create_kpath(Nk):
G2K = (0.4444444444444444 + 0.1111111111111111) ** 0.5
K2M = ((0.6666666666666666 - 0.5) ** 2 + (0.3333333333333333 - 0.5) ** 2) ** 0.5
M2G = (0.25 + 0.25) ** 0.5
Kdist = G2K + K2M + M2G
NG2K = int(Nk / Kdist * G2K)
NK2M = int(Nk / Kdist * K2M)
NM2G = int(Nk / Kdist * M2G)
def from_to(N, f, t):
full = np.empty([N, 3])
ls = np.linspace(0, 1, N, endpoint=False)
for i in range(3):
full[:, i] = f[i] + (t[i] - f[i]) * ls
return full
kG2K = from_to(NG2K, [0.0, 0.0, 0.0], [0.6666666666666666, 0.3333333333333333, 0])
kK2M = from_to(NK2M, [0.6666666666666666, 0.3333333333333333, 0], [0.5, 0.5, 0.0])
kM2G = from_to(NM2G, [0.5, 0.5, 0.0], [0.0, 0.0, 0.0])
xtick = [0, NG2K - 1, NG2K + NK2M - 1, NG2K + NK2M + NM2G - 1]
label = ['G', 'K', 'M', 'G']
return ([xtick, label], np.vstack((kG2K, kK2M, kM2G)))
def plot_bandstructure(H, Nk, ymin=None, ymax=None, style='.',
color='k', label=None):
if type(H) is str:
H = si.get_sile(H).read_hamiltonian()
ticks, k = create_kpath(Nk)
eigs = np.empty([len(k), H.no], np.float64)
for ik, k in enumerate(k):
print('{} / {}'.format(ik+1, Nk), end='\r')
eigs[ik, :] = H.eigh(k=k, eigvals_only=True)
ax = plt.gca()
for n in range(H.no):
print('{} / {}'.format(n+1, H.no), end='\r')
ax.plot(eigs[:, n], style, color=color, label=label if n == 0 else "")
ax.xaxis.set_ticks(ticks[0])
ax.set_xticklabels(ticks[1])
if ymin is None:
ymin = ax.get_ylim()[0]
if ymax is None:
ymax = ax.get_ylim()[1]
ax.set_ylim(ymin, ymax)
for tick in ticks[0]:
ax.plot([tick, tick], [ymin, ymax], 'k')
return ax
def list2colors(inp, colormap, vmin=None, vmax=None):
norm = plt.Normalize(vmin, vmax)
return colormap(norm(inp))
def get_dft_param(tshs, ia, iio, jjo, unique=False, onlynnz=False, idx=None):
""" Read Hamiltonian and get coupling constants between
'iio'-th orbital of atom 'ia' and 'jjo'-th orbital of all other atoms
"""
# Read Hamiltonian
if isinstance(tshs, str):
tshs = si.get_sile(tshs).read_hamiltonian()
HS = tshs.copy()
# Index of iio-th orbital of ia-th atom
io = HS.a2o(ia) + iio
# Coupling elements (all orbitals)
edges = HS.edges(orbital=io, exclude=-1)
# Remove non-jjo connections
# convert to atoms (only unique values)
edges = HS.o2a(edges, unique=True)
if idx is not None:
mask = np.in1d(edges, idx)
edges = edges[mask]
# backconvert to the jjo'th orbital on the connecting atoms
edges = HS.a2o(edges) + jjo
r = HS.orij(io, edges)
couplings = HS[io, edges]
# Sort according to r
idx_sorted = np.argsort(r)
r = r[idx_sorted]
couplings = couplings[idx_sorted, :]
if unique:
idx_uniq, cnt_uniq = np.unique(r.round(decimals=2), return_index=True, return_counts=True)[1:]
r = r[idx_uniq]
couplings = np.array([np.average(couplings[iu:(iu+cu), :], axis=0) for iu,cu in zip(idx_uniq, cnt_uniq)])
return r, couplings
def get_R_hop(tshs, tbt, xyz_tip, pzidx, nn, z_gr=None, return_S=False):
a_dev = tbt.a_dev
tshs_dev = tshs.sub(a_dev)
if z_gr == None:
z_gr = tshs_dev.xyz[0, 2]
C_list = (tshs_dev.xyz[:, 2] == z_gr).nonzero()[0]
# Check that we have selected only carbon atoms
for ia, a in zip(C_list, tshs_dev.atom[C_list]):
if a.Z != 6:
print('WARNING: Some atoms are not carbons in the graphene plane: {} {}'.format(ia, tshs_dev.xyz[ia]))
# Get distances of all C atoms from tip (x,y) position
# (notice that tshs_dev.xyz = tshs.xyz, so we need to use xyz_tip wrt full geom)
#xyz_tip_dev = xyz_tip - tshs_dev.xyz[0]
#xyz_tip_dev[2] = tshs_dev.xyz[0, 2]
_, distance = tshs_dev.geom.close_sc(xyz_tip, R=np.inf, idx=C_list, ret_rij=True)
# Get onsite and couplings for each of the atoms, up to the 3rd nn
hoppings = np.empty((len(distance), nn+1))
if return_S:
overlaps = np.empty((len(distance), nn+1))
for ia in C_list:
# Extracting only pz-projected parameters from TSHS of graphene with tip
_, tmp = get_dft_param(tshs_dev, ia, pzidx, pzidx, unique=True, onlynnz=True, idx=C_list)
for i in range(nn+1):
hoppings[ia, i] = tmp[i][0]
if return_S:
overlaps[ia, i] = tmp[i][1]
# Write sorted data for future usage
isort = np.argsort(distance)
si.io.TableSile('couplings.txt', 'w').write_data(distance[isort], *hoppings[isort].T)
if return_S:
return distance[isort], hoppings[isort].T, overlaps[isort].T
return distance[isort], hoppings[isort].T
def plot_couplings_dft2tb(tshs_pristine, tshs, tbt, xyz_tip, pzidx=2, figname='dH.pdf'):
"""
Compare onsite and couplings of pristine graphene with those of a
dirty graphene system.
Plots both raw data and relative difference.
#
# param0[i][j]
# i=0: on-site
# i=1: 1nn coupling
# i=2: 2nn coupling
# i=3: 3nn coupling
# j=0 : Hamiltonian matrix
# j=1 : Overlap matrix
Example:
import sisl as si
from tbtncTools import plot_couplings_dft2tb
tshs_pristine = si.get_sile('../../pristine_300kpt/GR.TSHS').read_hamiltonian()
tshs = si.get_sile('../../tip_atop_szp/z1.8/GR.TSHS').read_hamiltonian()
tbt = si.get_sile('../../tip_atop_szp/z1.8/siesta.TBT.nc')
xyz_tip = tshs.xyz[-1, :]
plot_couplings_dft2tb(tshs_pristine, tshs, tbt, xyz_tip, pzidx=2, figname='dH.pdf')
"""
# Plot reference lines for well converged pristine graphene system
fig = plt.figure()
ax = fig.add_subplot(111)
# Extracting only pz-projected parameters from TSHS of perfect graphene
_, param0 = get_dft_param(tshs_pristine, 0, pzidx, pzidx, unique=True, onlynnz=True)
# Plot
ax.axhline(y=param0[0][0], label='On-site', c='k', ls='-')
ax.axhline(y=param0[1][0], label='1nn coupling', c='g', ls='-')
ax.axhline(y=param0[2][0], label='2nn coupling', c='r', ls='-')
ax.axhline(y=param0[3][0], label='3nn coupling', c='b', ls='-')
# Plot onsite and couplings for well converged "dirty" graphene system
distance, param = get_R_hop(tshs, tbt, xyz_tip, pzidx)
# Plot
ax.scatter(distance, param[0], label='On-site (tip)', c='k')#, ls='--')
ax.scatter(distance, param[1], label='1nn coupling (tip)', c='g')#, ls='--')
ax.scatter(distance, param[2], label='2nn coupling (tip)', c='r')#, ls='--')
ax.scatter(distance, param[3], label='3nn coupling (tip)', c='b')#, ls='--')
# Mark the distance between the tip (x,y) and the closest distance from outmost frame atoms
rM01 = np.absolute(np.amax(tshs.xyz[:, 0]) - xyz_tip[0])
rM02 = np.absolute(np.amin(tshs.xyz[:, 0]) - xyz_tip[0])
rM11 = np.absolute(np.amax(tshs.xyz[:, 1]) - xyz_tip[1])
rM12 = np.absolute(np.amin(tshs.xyz[:, 1]) - xyz_tip[1])
rM = np.amin([rM01, rM02, rM11, rM12])
ax.axvline(x=rM, c='k', ls='--')
# General plot settings
plt.xlim(0., np.amax(distance))
ax.set_xlabel('$r-r_{\mathrm{tip}}\,(\AA)$')
ax.set_ylabel('E (eV)')
plt.legend(loc=4, fontsize=10, ncol=2)
plt.tight_layout()
for o in fig.findobj():
o.set_clip_on(False)
plt.savefig(figname)
# Plot relative difference
f, axes = plt.subplots(4, sharex=True)
f.subplots_adjust(hspace=0)
axes[0].scatter(distance, param[0]-np.full(len(distance), param0[0][0]),
label='On-site', c='k')
axes[1].scatter(distance, param[1]-np.full(len(distance), param0[1][0]),
label='1nn coupling', c='g')
axes[2].scatter(distance, param[2]-np.full(len(distance), param0[2][0]),
label='2nn coupling', c='r')
axes[3].scatter(distance, param[3]-np.full(len(distance), param0[3][0]),
label='3nn coupling', c='b')
# Mark the distance between the tip (x,y) and the closest distance from outmost frame atoms
for a in axes:
a.axhline(y=0., c='lightgrey', ls='-')
a.axvline(x=rM, c='k', ls='--')
#a.autoscale()
a.set_xlim(0., np.amax(distance))
a.set_ylim(a.get_ylim()[0], 0.)
a.yaxis.set_major_locator(plt.MaxNLocator(3))
# General plot settings
axes[-1].set_xlabel('$r-r_{\mathrm{tip}}\,(\AA)$')
f.text(0.025, 0.5, '$\Delta E $ (eV)', ha="center", va="center", rotation=90)
#for o in f.findobj():
# o.set_clip_on(False)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
plt.savefig('diff_'+figname)
def sc_xyz_shift(geom, axis):
return (geom.cell[axis,axis] - (np.amax(geom.xyz[:,axis]) - np.amin(geom.xyz[:,axis])))/2
#def Delta(TSHS, HS_TB, shape='Cuboid', z_graphene=None, ext_offset=None, center=None,
def Delta(TSHS, shape='Cuboid', z_graphene=None, ext_offset=None, center=None,
thickness=None, zaxis=2, atoms=None, segment_dir=None):
# z coordinate of graphene plane
if z_graphene is None:
print('\n\nPlease provide a value for z_graphene in Delta routine')
exit(1)
# Center of shape in TSHS
if center is None:
center = TSHS.center(atom=(TSHS.xyz[:,zaxis] == z_graphene).nonzero()[0])
center = np.asarray(center)
# Thickness in Ang
if thickness is None:
thickness = 6. # Ang
#thickness = HS_TB.maxR()+0.01
thickness = np.asarray(thickness, np.float64)
# Cuboid or Ellissoid?
if zaxis == 2:
size = .5*np.diagonal(TSHS.cell) + [0,0,300] # default radius is half the cell size
elif zaxis == 0:
size = .5*np.diagonal(TSHS.cell) + [300,0,0] # default radius is half the cell size
elif zaxis == 1:
size = .5*np.diagonal(TSHS.cell) + [0,300,0] # default radius is half the cell size
if shape == 'Ellipsoid' or shape == 'Sphere':
mkshape = si.shape.Ellipsoid
elif shape == 'Cuboid' or shape == 'Cube':
mkshape = si.shape.Cuboid
# In this case it's the full perimeter so we double
size *= 2
thickness *= 2
if ext_offset is not None:
ext_offset = np.asarray(ext_offset, np.float64).copy()
ext_offset *= 2
elif shape == 'Segment':
mkshape = si.shape.Cuboid
# In this case it's the full perimeter so we double
size *= 2
area_tot = mkshape(size, center=TSHS.center(atom=(TSHS.xyz[:,zaxis] == z_graphene).nonzero()[0]))
size[segment_dir] = thickness
if ext_offset is not None:
ext_offset = np.asarray(ext_offset, np.float64).copy()
else:
print('\n shape = "{}" is not implemented...'.format(shape))
exit(1)
if shape == 'Segment': # ADD COMPLEMENTARY AREA...
# Areas
Delta = mkshape(size, center=center)
# Atoms within Delta and complementary area
a_Delta = Delta.within_index(TSHS.xyz)
if atoms is not None:
a_Delta = a_Delta[np.in1d(a_Delta, atoms)]
# Check
v = TSHS.geom.copy(); v.atom[a_Delta] = si.Atom(8, R=[1.43]); v.write('a_Delta.xyz')
return a_Delta, Delta
else:
# External boundary
area_ext = mkshape(size, center=center)
# Adjust with ext_offset if necessary
if ext_offset is not None:
ext_offset = np.asarray(ext_offset, np.float64)
area_ext = area_ext.expand(-ext_offset)
# Force it to be Cube or Sphere (side = ext_offset) if necessary
if shape == 'Sphere' or shape == 'Cube':
if len(ext_offset.nonzero()[0]) > 1:
print('Offset is in both axes. Please set "shape" to Cuboid or Ellipsoid')
exit(1)
axis = ext_offset.nonzero()[0][0]
print('Offset is non-zero along axis: {}...complementary is {}'.format(axis, int(axis<1)))
new_ext_offset = np.zeros(3); new_ext_offset[int(axis<1)] = ext_offset[axis]
area_ext = area_ext.expand(-new_ext_offset)
#a_ext = area_ext.within_index(TSHS.xyz)
# Internal boundary
area_int = area_ext.expand(-thickness)
# Disjuction composite shape
Delta = area_ext - area_int
# Atoms within Delta and internal boundary
a_Delta = Delta.within_index(TSHS.xyz)
a_int = area_int.within_index(TSHS.xyz)
if atoms is not None:
a_Delta = a_Delta[np.in1d(a_Delta, atoms)]
# Check
v = TSHS.geom.copy(); v.atom[a_Delta] = si.Atom(8, R=[1.43]); v.write('a_Delta.xyz')
return a_Delta, a_int, Delta, area_ext, area_int
def makeTB(TSHS_0, pzidx, nn, WW, LL, elec=None, save=True, return_bands=False):
"""
TSHS_0: tbtncSile object from "pristine graphene" reference calculation
pzidx: index of pz orbitals in the basis set used to create 'TSHS_0'
nn: no. of neighbours to be used in the TB model
W: width of TB geometry (Angstrom) - transverse direction: 0 -
L: length of TB geometry (Angstrom) - transport direction: 1 -
elec: tbtncSile object from electrode calculation
"""
########################## From PERFECT graphene reference TSHS
dR = 0.005
# Check
for a in TSHS_0.atom.atom:
if a.Z != 6:
print('ERROR: cannot build TB model because the provided geometry \
is not a pristine graphene')
exit(1)
# Extracting only pz-projected parameters from TSHS of perfect graphene
r, param = get_dft_param(TSHS_0, 0, pzidx, pzidx, unique=True, onlynnz=True)
print('\nEffective no. of neighbors per atom from TSHS_0: {}'.format(len(r)-1))
print('\nr ({}; Angstrom)\t param ({}; eV):'.format(len(r), len(param)))
for ri, ci in zip(r, param):
print('{:.5f} \t '.format(ri), ci)
def get_graphene_H(radii, param, dR=dR):
# In order to get the correct radii of the orbitals it is
# best to define them explicitly.
# This enables one to "optimize" the number of supercells
# subsequently.
# Define the radii of the orbital to be the maximum
C = si.Atom(6, R=radii[-1] + dR)
# Define graphene
g = si.geom.graphene(radii[1], C, orthogonal=True)
g.optimize_nsc()
# Now create Hamiltonian
H = si.Hamiltonian(g, orthogonal=False)
# Define primitive also for check of bandstructure
g_s = si.geom.graphene(radii[1], C)
g_s.optimize_nsc()
H_s = si.Hamiltonian(g_s, orthogonal=False)
if len(param.shape) == 1:
# Create a new fake parameter
# with overlap elements
new_param = np.zeros([len(param), 2], dtype=np.float64)
new_param[:, 0] = param
new_param[0, 1] = 1. # on-site, everything else, zero
param = new_param
H.construct((radii+dR, param))
H_s.construct((radii+dR, param))
return H, H_s
# Setup the Hamiltonian building block
if nn is 'all':
print('WARNING: you are retaining ALL interactions from DFT model')
H0, H0_s = get_graphene_H(r, param)
else:
print('WARNING: you are retaining only interactions up to {} neighbours'.format(nn))
H0, H0_s = get_graphene_H(r[:nn+1], param[:nn+1])
print('\nBuilding block for TB model:\n', H0)
# Setup TB model
W, L = int(round(WW/H0.cell[0,0])), int(round(LL/H0.cell[1,1]))
# ELECTRODE
if elec is not None:
n_el = int(round(elec.cell[1,1]/H0.cell[1,1]))
else:
n_el = 2
HS_elec = H0.tile(W, 0).tile(n_el, 1)
HS_elec.write('HS_ELEC.nc')
HS_elec.geom.write('HS_ELEC.fdf')
HS_elec.geom.write('HS_ELEC.xyz')
# DEVICE + ELECTRODES (to be written ONLY after selection and rearranging of GF/dSE area)
HS_dev = H0.tile(W, 0).tile(L, 1)
if save:
HS_dev.write('HS_DEV_0.nc')
HS_dev.geom.write('HS_DEV_0.fdf')
HS_dev.geom.write('HS_DEV_0.xyz')
# Check bands with primitive cell
if return_bands:
# Open figure outside and bands will automatically be added to the plot
plot_bandstructure(H0_s, 400, ymin=-3, ymax=3,
style='-', color='k', label='Pristine $p_z$ parameters')
return HS_dev
def makeTB_FrameOutside(tshs, tbt, center, TSHS_0, pzidx, nn, WW, LL,
elec=None, save=True, return_bands=False, z_graphene=None):
"""
tshs: TSHS object from "dirty graphene" calculation
tbt: tbtncSile object from tbtrans calculation with HS: "tshs"
TSHS_0: TSHS object from "pristine graphene" reference calculation
pzidx: index of pz orbitals in the basis set used to create 'TSHS_0'
nn: no. of neighbours to be used in the TB model
WW: width of TB geometry (Angstrom) - transverse direction: 0 -
LL: length of TB geometry (Angstrom) - transport direction: 1 -
TSHS_elec: tbtncSile object from electrode calculation
save: True will store device region netcdf files for usage in tbtrans
"""
########################## From PERFECT graphene reference TSHS
dR = 0.005
# Check that TSHS_0 has only carbon atoms
for a in TSHS_0.atom.atom:
if a.Z != 6:
print('ERROR: cannot build TB model because the provided geometry\n\tis not a pristine graphene')
exit(1)
# Extracting only pz-projected parameters from TSHS of perfect graphene
r, param = get_dft_param(TSHS_0, 0, pzidx, pzidx, unique=True, onlynnz=True)
print('\nEffective no. of neighbors per atom from TSHS_0: {}'.format(len(r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(r), len(param)))
for ri, ci in zip(r, param):
print('{:.5f} \t '.format(ri), ci)
# Setup the Hamiltonian building block
if nn is 'all':
nn = len(r)-1
# The reference values we wish to target (pristine graphene)
ref_r, ref_hop, ref_over = r[:nn+1], param[:nn+1, 0], param[:nn+1, 1]
print('Targeted no. of neighbors per atom from TSHS_0: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(ref_r), len(ref_hop)))
for ri, ci, oi in zip(ref_r, ref_hop, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# R and hopping from tshs, center is the coordinates of the tip apex
# This works Only if the frame is the outmost atoms in tbt.a_dev
# Maybe it's better to define a shape here!
if z_graphene is None:
print('\n\nPlease provide a value for z_graphene')
exit(1)
if center is None:
center = tshs.center(atom=(tshs.xyz[:,2] == z_graphene).nonzero()[0])
print('makeTB: you are considering this as center: {}'.format(center))
distances, hop = get_R_hop(tshs, tbt, center, pzidx, nn, z_gr=z_graphene)
hop_atframe = [np.average(hop[i, np.arange(-10, 0)]) for i in range(nn+1)]
# r's to plot
r2plot = np.linspace(0, np.amax(distances), 1000)
f, ax = plt.subplots(nn+1, sharex=True)
for i in range(nn+1):
ax[i].scatter(distances, hop[i, :])
# Plot lines
ax[i].plot([r2plot.min(), r2plot.max()], [ref_hop[i], ref_hop[i]], '--')
ymin = np.amin([ref_hop[i], hop_atframe[i]]) - 0.1
ymax = np.amax([ref_hop[i], hop_atframe[i]]) + 0.1
ax[i].set_ylim(ymin, ymax)
ax[i].set_xlim(r2plot.min(), r2plot.max())
f.savefig('shifting_data.pdf')
plt.close(f)
###### Create device Hamiltonian
bond = ref_r[1] # to make it fit in a smaller unit-cell
C = si.Atom(6, R=ref_r[-1] + dR)
g0 = si.geom.graphene(bond, C, orthogonal=True)
g0.optimize_nsc()
H0 = si.Hamiltonian(g0, orthogonal=False)
print('\nNo. of neighbors per atom: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t Final parameters from frame ({}; eV):'.format(len(ref_r), len(hop_atframe)))
for ri, ci, oi in zip(ref_r, hop_atframe, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# Construct TB. onsite is the same as tip tshs, while couplings are the same as pristine
H0.construct((ref_r+dR, zip(hop_atframe, ref_over)), eta=True)
# DEVICE + ELECTRODES geometry
# Width and length of device
W, L = int(round(WW/g0.cell[0,0])), int(round(LL/g0.cell[1,1]))
print('Device is {} x {} supercell of the unit orthogonal cell'.format(W, L))
# (nc files should be written ONLY after selection and rearranging of GF/dSE area)
HS_dev = H0.tile(W, 0).tile(L, 1)
if save:
HS_dev.write('HS_DEV.nc')
HS_dev.geom.write('HS_DEV.fdf')
HS_dev.geom.write('HS_DEV.xyz')
# ELECTRODE
if elec is not None:
n_el = int(round(elec.cell[1,1]/H0.cell[1,1]))
else:
n_el = 2
HS_elec = H0.tile(W, 0).tile(n_el, 1)
HS_elec.write('HS_ELEC.nc')
HS_elec.geom.write('HS_ELEC.fdf')
HS_elec.geom.write('HS_ELEC.xyz')
# Check bands with primitive cell
if return_bands:
g0_s = si.geom.graphene(bond, C)
g0_s.optimize_nsc()
H0_s = si.Hamiltonian(g0_s, orthogonal=False)
H0_s.construct((ref_r+dR, zip(hop_atframe, ref_over)))
# Open figure outside and bands will automatically be added to the plot
plot_bandstructure(H0_s, 400, ymin=-3, ymax=3,
style='--', color='r', label='Pristine w/ tip $p_z$ onsite')
return HS_dev
def interp1d(x, y, y0, y1):
""" Create an interpolation function from x, y.
The resulting function has these properties:
x < x.min():
f(x) = y0
x.min() < x < x.max():
f(x) = y
x.max() < x:
f(x) = y1
"""
return sp.interpolate.interp1d(x, y, bounds_error=False,
fill_value=(y0, y1))
def func_smooth_fermi(x, y, first_x, second_x, y1, delta=8):
""" Return an interpolation function with the following properties:
x < first_x:
f(x) = y(first_x)
first_x < x < second_x:
f(x) = y
second_x < x
f(x) = y1
`delta` determines the amount of the smearing width that is between `first_x` and
`second_x`.
Parameters
----------
x, y : numpy.ndarray
x/y-data points
first_x : float
the point of cut-off for the x-values. In this approximation we assume
the `y` data-points has a plateau in the neighbourhood of `first_x`
second_x : float
above this `x` value all values will be `y1`.
y1 : float
second boundary value
delta : float, optional
amount of smearing parameter in between `first_x` and `second_x` (should not be below 6!).
"""
# First we will find the first flat plateau
# We do this considering values -3 : +3 Ang
if first_x < np.amax(x):
raise ValueError("first_x has to be larger than maximum, interpolation x value")
# First we will find the first flat plateau
# We do this considering values -3 : r_max Ang
idx = (np.amax(x) - x > -3.).nonzero()[0]
y0 = np.average(y[idx])
# We already have the second plateau.
# So all we have to do is calculate the smearing
# to capture the smoothing range
mid_x = (first_x + second_x) / 2
sigma = (second_x - first_x) / delta
if y0 < y1:
sigma = - sigma
b = y0
else:
b = y1
# Now we can create the function
dd = delta / 2. + 1.
## Now calculate function parameters used for interpolation
#x = np.arange(first_x - dd , second_x + dd, 0.01) # 0.01 Ang precision
#y = abs(y1 - y0) / (np.exp((x - mid_x) / sigma) + 1) + b
#return interp1d(x, y, y0, y1)
# Now we can create the function
dd = delta / 2. + 1.
# Now calculate function parameters used for interpolation
xff = np.arange(first_x, second_x + 2 * dd, 0.01) # 0.01 Ang precision
yff = abs(y1 - y0) / (np.exp((x - mid_x) / sigma) + 1) + b
return interp1d(np.append(x, xff), np.append(y, yff), y[0], y1)
def func_smooth_linear(x, y):
return sp.interpolate.interp1d(x, y, kind='cubic', fill_value=(y[0], y[-1]), bounds_error=False)
def func_smooth(x, y, first_x=None, second_x=None, y1=None, delta=8, what='linear'):
if what is None:
what = 'linear'
if what == 'fermi':
return func_smooth_fermi(x, y, first_x, second_x, y1, delta)
elif what == 'linear':
return func_smooth_linear(x, y)
def makeTB_InterpFrame(tshs, tbt, xyz_tip, TSHS_0, pzidx, nn, WW, LL,
elec=None, save=True, return_bands=False, avg=False):
"""
tshs: TSHS object from "dirty graphene" calculation
tbt: tbtncSile object from tbtrans calculation with HS: "tshs"
TSHS_0: TSHS object from "pristine graphene" reference calculation
pzidx: index of pz orbitals in the basis set used to create 'TSHS_0'
nn: no. of neighbours to be used in the TB model
WW: width of TB geometry (Angstrom) - transverse direction: 0 -
LL: length of TB geometry (Angstrom) - transport direction: 1 -
TSHS_elec: tbtncSile object from electrode calculation
save: True will store device region netcdf files for usage in tbtrans
"""
########################## From PERFECT graphene reference TSHS
dR = 0.005
# Check that TSHS_0 has only carbon atoms
for a in TSHS_0.atom.atom:
if a.Z != 6:
print('ERROR: cannot build TB model because the provided geometry\n\tis not a pristine graphene')
exit(1)
# Extracting only pz-projected parameters from TSHS of perfect graphene
r, param = get_dft_param(TSHS_0, 0, pzidx, pzidx, unique=True, onlynnz=True)
print('\nEffective no. of neighbors per atom from TSHS_0: {}'.format(len(r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(r), len(param)))
for ri, ci in zip(r, param):
print('{:.5f} \t '.format(ri), ci)
# Setup the Hamiltonian building block
if nn is 'all':
nn = len(r)-1
# The reference values we wish to target (pristine graphene)
ref_r, ref_hop, ref_over = r[:nn+1], param[:nn+1, 0], param[:nn+1, 1]
print('Targeted no. of neighbors per atom from TSHS_0: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(ref_r), len(ref_hop)))
for ri, ci, oi in zip(ref_r, ref_hop, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# Get distance from tip and relative hoppings, sorted
distances, hop = get_R_hop(tshs, tbt, xyz_tip, pzidx, nn)
if avg:
hop_atframe = [np.average(hop[i, np.arange(-10, 0)]) for i in range(nn+1)]
else:
fit = [func_smooth(distances, hop[i, :]) for i in range(nn+1)]
# r's to plot
r2plot = np.linspace(0, 1.2*distances[-1], 1000)
f, ax = plt.subplots(nn+1, sharex=True)
for i in range(nn+1):
ax[i].scatter(distances, hop[i, :])
ax[i].plot(r2plot, fit[i](r2plot))
# Plot lines
#ax[i].plot([r2plot.min(), r2plot.max()], [ref_hop[i], ref_hop[i]], '--')
#ymin = np.amin([ref_hop[i], fit[i](distances[-1])]) - 0.1
#ymax = np.amax([ref_hop[i], fit[i](distances[-1])]) + 0.1
#ax[i].plot([distances[-1], distances[-1]], [ymin, ymax], '--')
#ax[i].set_ylim(ymin, ymax)
ax[i].set_xlim(r2plot.min(), r2plot.max())
f.savefig('fit_data.pdf')
plt.close(f)
ftyifti
###### Create device Hamiltonian using the correct parameters
bond = ref_r[1] # to make it fit in a smaller unit-cell
C = si.Atom(6, R=ref_r[-1] + dR)
g0 = si.geom.graphene(bond, C, orthogonal=True)
g0.optimize_nsc()
# Width and length of device
W, L = int(round(WW/g0.cell[0,0])), int(round(LL/g0.cell[1,1]))
# DEVICE + ELECTRODES geometry (without PBC!!!)
# (nc files should be written ONLY after selection and rearranging of GF/dSE area)
g = g0.tile(W, 0).tile(L, 1)
g.set_nsc([1] *3)
HS_dev = si.Hamiltonian(g, orthogonal=False)
# Create the connectivity values
Hc = [np.empty(len(g)) for i in range(nn+1)]
# # Get tip (x,y) position in large TB
# frameOrigin_xyz = g.xyz[frameOrigin-TSHS_elec.na]
# print('Frame reference (x, y, z=z_graphene) coordinates (low-left) in large TB geometry are:\n\t{}'.format(frameOrigin_xyz))
# c_xyz = frameOrigin_xyz + xyz_tip
# c_xyz[2] = frameOrigin_xyz[2]
# print('Tip (x, y, z=z_graphene) coordinates in large TB geometry are:\n\t{}'.format(c_xyz))
# c_xyz = c_xyz.reshape(1, 3)
# Now loop and construct the Hamiltonian
def func(self, ia, idxs, idxs_xyz=None):
idx_a, xyz_a = self.geom.close(ia, R=ref_r+dR, idx=idxs,
idx_xyz=idxs_xyz, ret_xyz=True)
# Calculate distance to center
# on-site does not need averaging
rr = np.sqrt(np.square(xyz_a[0] - c_xyz).sum(1))
f = fit[0](rr)
self[ia, idx_a[0], 0] = f
self[ia, idx_a[0], 1] = ref_over[0]
Hc[0][ia] = np.average(f)
xyz = g.xyz[ia, :].reshape(1, 3)
for i in range(1, len(idx_a)):
rr = np.sqrt(np.square((xyz_a[i] + xyz)/2 - c_xyz).sum(1))
f = fit[i](rr)
self[ia, idx_a[i], 0] = f
self[ia, idx_a[i], 1] = ref_over[i]
Hc[i][ia] = np.average(f)
HS_dev.construct(func, eta=True)
# Extract at Gamma for plot
Hk = HS_dev.tocsr(0)
# Check for Hermiticity
if np.abs(Hk - Hk.T).max() != 0.:
print('ERROR: Hamitonian is NOT HERMITIAN!')
exit(0)
# Plot onsite and coupling maps
cm = plt.cm.get_cmap('RdYlBu')
x = HS_dev.xyz[:, 0]
y = HS_dev.xyz[:, 1]
for i in range(nn+1):
plt.figure()
z = Hc[i]
sc = plt.scatter(x, y, c=abs(z), edgecolor='none', cmap=cm)
plt.colorbar(sc)
plt.savefig('fermifit_{}.png'.format(i), dpi=300)
if save:
HS_dev.write('HS_DEV.nc')
HS_dev.geom.write('HS_DEV.fdf')
HS_dev.geom.write('HS_DEV.xyz')
# ELECTRODE
n_el = int(round(TSHS_elec.cell[1,1]/g0.cell[1,1]))
H0 = si.Hamiltonian(g0, orthogonal=False)
H0.construct((ref_r+dR, zip(ref_hop, ref_over)))
HS_elec = H0.tile(W, 0).tile(n_el, 1)
HS_elec.write('HS_ELEC.nc')
HS_elec.geom.write('HS_ELEC.fdf')
HS_elec.geom.write('HS_ELEC.xyz')
# Check bands with primitive cell
if return_bands:
g0_s = si.geom.graphene(bond, C)
g0_s.optimize_nsc()
H0_s = si.Hamiltonian(g0_s, orthogonal=False)
H0_s.construct((ref_r+dR, zip(ref_hop, ref_over)))
# Open figure outside and bands will automatically be added to the plot
plot_bandstructure(H0_s, 400, ymin=-3, ymax=3,
style='-.', color='b', label='After Fermi fit')
return HS_dev
### TO FIX
def makeTB_fermi(tshs, tbt, xyz_tip, frameOrigin, TSHS_0, pzidx, nn,
WW, LL, elec, save=True, cut_R=None, smooth_R=15., return_bands=False):
"""
tshs: TSHS object from "dirty graphene" calculation
tbt: tbtncSile object from tbtrans calculation with HS: "tshs"
xyz_tip: coordinates of tip apex atom in tshs, after setting z=z_graphene
TSHS_0: TSHS object from "pristine graphene" reference calculation
pzidx: index of pz orbitals in the basis set used to create 'TSHS_0'
nn: no. of neighbours to be used in the TB model
WW: width of TB geometry (Angstrom) - transverse direction: 0 -
LL: length of TB geometry (Angstrom) - transport direction: 1 -
elec: tbtncSile object from electrode calculation
save: True will store device region netcdf files for usage in tbtrans
smooth_R: The length over which we will smooth the function (Angstrom)
"""
########################## From PERFECT graphene reference TSHS
dR = 0.005
# Check that TSHS_0 has only carbon atoms
for a in TSHS_0.atom.atom:
if a.Z != 6:
print('ERROR: cannot build TB model because the provided geometry\n\tis not a pristine graphene')
exit(1)
# Extracting only pz-projected parameters from TSHS of perfect graphene
r, param = get_dft_param(TSHS_0, 0, pzidx, pzidx, unique=True, onlynnz=True)
print('Effective no. of neighbors per atom from TSHS_0: {}'.format(len(r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(r), len(param)))
for ri, ci in zip(r, param):
print('{:.5f} \t '.format(ri), ci)
# Setup the Hamiltonian building block
if nn is 'all':
nn = len(r)-1
# The reference values we wish to target (pristine graphene)
ref_r, ref_hop, ref_over = r[:nn+1], param[:nn+1, 0], param[:nn+1, 1]
print('Targeted no. of neighbors per atom from TSHS_0: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(ref_r), len(ref_hop)))
for ri, ci, oi in zip(ref_r, ref_hop, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# R and hopping from tshs, xyz_tip is the coordinates of the tip apex
# This works Only if the frame is the outmost atoms in tbt.a_dev
# Maybe it's better to define a shape here!
distances, hop = get_R_hop(tshs, tbt, xyz_tip, pzidx, nn)
# Create Fermi-like function to smooth hop towards ref_hop
print(np.amax(distances))
print(cut_R)
if cut_R is None:
cut_R = np.amax(distances)
print('\nCutoff radius in TSHS: {} Ang'.format(cut_R))
fermi_fit = [func_smooth(distances, hop[i, :], cut_R, cut_R + smooth_R, ref_hop[i]) for i in range(nn+1)]
# r's to plot
r2plot = np.linspace(0, cut_R+1.2*smooth_R, 1000)
f, ax = plt.subplots(nn+1, sharex=True)
for i in range(nn+1):
ax[i].scatter(distances, hop[i, :])
ax[i].plot(r2plot, fermi_fit[i](r2plot))
# Plot lines
ax[i].plot([r2plot.min(), r2plot.max()], [ref_hop[i], ref_hop[i]], '--')
ymin = np.amin([ref_hop[i], fermi_fit[i](cut_R)]) - 0.1
ymax = np.amax([ref_hop[i], fermi_fit[i](cut_R)]) + 0.1
ax[i].plot([cut_R, cut_R], [ymin, ymax], '--')
ax[i].plot([cut_R+smooth_R, cut_R+smooth_R], [ymin, ymax], '--')
ax[i].set_ylim(ymin, ymax)
ax[i].set_xlim(r2plot.min(), r2plot.max())
f.savefig('fermifit_data.pdf')
plt.close(f)
fuifguyi
###### Create device Hamiltonian using the correct parameters
bond = ref_r[1] # to make it fit in a smaller unit-cell
C = si.Atom(6, R=ref_r[-1] + dR)
g0 = si.geom.graphene(bond, C, orthogonal=True)
g0.optimize_nsc()
# Width and length of device
W, L = int(round(WW/g0.cell[0,0])), int(round(LL/g0.cell[1,1]))
# DEVICE + ELECTRODES geometry
# (nc files should be written ONLY after selection and rearranging of GF/dSE area)
# MAYBE NEED TO STORE THIS ONLY AFTERWORDS!!!! OR MAYBE NOT...
g = g0.tile(W, 0).tile(L, 1)
g.set_nsc([1] *3)
HS_dev = si.Hamiltonian(g, orthogonal=False)
# Create the connectivity values
Hc = [np.empty(len(g)) for i in range(nn+1)]
# Get tip (x,y) position in large TB
frameOrigin_xyz = g.xyz[frameOrigin-elec.na]
print('Frame reference (x, y, z=z_graphene) coordinates (low-left) in large TB geometry are:\n\t{}'.format(frameOrigin_xyz))
c_xyz = frameOrigin_xyz + xyz_tip
c_xyz[2] = frameOrigin_xyz[2]
print('Tip (x, y, z=z_graphene) coordinates in large TB geometry are:\n\t{}'.format(c_xyz))
c_xyz = c_xyz.reshape(1, 3)
# Now loop and construct the Hamiltonian
def func(self, ia, idxs, idxs_xyz=None):
xyz = g.xyz[ia, :].reshape(1, 3)
idx_a, xyz_a = self.geom.close(ia, R=ref_r+dR, idx=idxs, idx_xyz=idxs_xyz, ret_xyz=True)
# Calculate distance to center
# on-site does not need averaging
rr = np.sqrt(np.square(xyz_a[0] - c_xyz).sum(1))
f = fermi_fit[0](rr)
self[ia, idx_a[0], 0] = f
self[ia, idx_a[0], 1] = ref_over[0]
Hc[0][ia] = np.average(f)
for i in range(1, len(idx_a)):
rr = np.sqrt(np.square((xyz_a[i] + xyz)/2 - c_xyz).sum(1))
f = fermi_fit[i](rr)
self[ia, idx_a[i], 0] = f
self[ia, idx_a[i], 1] = ref_over[i]
Hc[i][ia] = np.average(f)
HS_dev.construct(func, eta=True)
# Extract at Gamma for plot
Hk = HS_dev.tocsr(0)
# Check for Hermiticity
if np.abs(Hk - Hk.T).max() != 0.:
print('ERROR: Hamitonian is NOT HERMITIAN!')
exit(0)
# Plot onsite and coupling maps
cm = plt.cm.get_cmap('RdYlBu')
x = HS_dev.xyz[:, 0]
y = HS_dev.xyz[:, 1]
for i in range(nn+1):
plt.figure()
z = Hc[i]
sc = plt.scatter(x, y, c=abs(z), edgecolor='none', cmap=cm)
plt.colorbar(sc)
plt.savefig('fermifit_{}.png'.format(i), dpi=300)
if save:
HS_dev.write('HS_DEV.nc')
HS_dev.geom.write('HS_DEV.fdf')
HS_dev.geom.write('HS_DEV.xyz')
# ELECTRODE
n_el = int(round(elec.cell[1,1]/g0.cell[1,1]))
H0 = si.Hamiltonian(g0, orthogonal=False)
H0.construct((ref_r+dR, zip(ref_hop, ref_over)))
HS_elec = H0.tile(W, 0).tile(n_el, 1)
HS_elec.write('HS_ELEC.nc')
HS_elec.geom.write('HS_ELEC.fdf')
HS_elec.geom.write('HS_ELEC.xyz')
# Check bands with primitive cell
if return_bands:
g0_s = si.geom.graphene(bond, C)
g0_s.optimize_nsc()
H0_s = si.Hamiltonian(g0_s, orthogonal=False)
H0_s.construct((ref_r+dR, zip(ref_hop, ref_over)))
# Open figure outside and bands will automatically be added to the plot
plot_bandstructure(H0_s, 400, ymin=-3, ymax=3,
style='-.', color='b', label='After Fermi fit')
return HS_dev
### NOT REALLY USEFUL
def makeTB_shifted(tshs, tbt, xyz_tip, TSHS_0, pzidx, nn, WW, LL, TSHS_elec,
save=True, shifted=True, return_bands=False):
"""
tshs: TSHS object from "dirty graphene" calculation
tbt: tbtncSile object from tbtrans calculation with HS: "tshs"
TSHS_0: TSHS object from "pristine graphene" reference calculation
pzidx: index of pz orbitals in the basis set used to create 'TSHS_0'
nn: no. of neighbours to be used in the TB model
WW: width of TB geometry (Angstrom) - transverse direction: 0 -
LL: length of TB geometry (Angstrom) - transport direction: 1 -
TSHS_elec: tbtncSile object from electrode calculation
save: True will store device region netcdf files for usage in tbtrans
"""
########################## From PERFECT graphene reference TSHS
dR = 0.005
# Check that TSHS_0 has only carbon atoms
for a in TSHS_0.atom.atom:
if a.Z != 6:
print('ERROR: cannot build TB model because the provided geometry\n\tis not a pristine graphene')
exit(1)
# Extracting only pz-projected parameters from TSHS of perfect graphene
r, param = get_dft_param(TSHS_0, 0, pzidx, pzidx, unique=True, onlynnz=True)
print('\nEffective no. of neighbors per atom from TSHS_0: {}'.format(len(r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(r), len(param)))
for ri, ci in zip(r, param):
print('{:.5f} \t '.format(ri), ci)
# Setup the Hamiltonian building block
if nn is 'all':
nn = len(r)-1
# The reference values we wish to target (pristine graphene)
ref_r, ref_hop, ref_over = r[:nn+1], param[:nn+1, 0], param[:nn+1, 1]
print('Targeted no. of neighbors per atom from TSHS_0: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t param ({}; eV):'.format(len(ref_r), len(ref_hop)))
for ri, ci, oi in zip(ref_r, ref_hop, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# R and hopping from tshs, xyz_tip is the coordinates of the tip apex
# This works Only if the frame is the outmost atoms in tbt.a_dev
# Maybe it's better to define a shape here!
distances, hop = get_R_hop(tshs, tbt, xyz_tip, pzidx, nn)
hop_atframe = [np.average(hop[i, np.arange(-10, 0)]) for i in range(nn+1)]
# r's to plot
r2plot = np.linspace(0, np.amax(distances), 1000)
f, ax = plt.subplots(nn+1, sharex=True)
for i in range(nn+1):
ax[i].scatter(distances, hop[i, :])
# Plot lines
ax[i].plot([r2plot.min(), r2plot.max()], [ref_hop[i], ref_hop[i]], '--')
ymin = np.amin([ref_hop[i], hop_atframe[i]]) - 0.1
ymax = np.amax([ref_hop[i], hop_atframe[i]]) + 0.1
ax[i].set_ylim(ymin, ymax)
ax[i].set_xlim(r2plot.min(), r2plot.max())
f.savefig('shifting_data.pdf')
plt.close(f)
###### Create device Hamiltonian using shifted on-site energy
bond = ref_r[1] # to make it fit in a smaller unit-cell
C = si.Atom(6, R=ref_r[-1] + dR)
g0 = si.geom.graphene(bond, C, orthogonal=True)
g0.optimize_nsc()
H0 = si.Hamiltonian(g0, orthogonal=False)
ref_hop_onshifted = ref_hop.copy()
if shifted:
ref_hop_onshifted[0] = hop_atframe[0]
print('\nFinal no. of neighbors per atom retained from TSHS_0: {}'.format(len(ref_r)-1))
print('r ({}; Angstrom)\t Final parameters ({}; eV):'.format(len(ref_r), len(ref_hop_onshifted)))
for ri, ci, oi in zip(ref_r, ref_hop_onshifted, ref_over):
print('{:.5f} \t '.format(ri), ci, oi)
# Construct TB. onsite is the same as tip tshs, while couplings are the same as pristine
H0.construct((ref_r+dR, zip(ref_hop_onshifted, ref_over)))
# DEVICE + ELECTRODES geometry
# Width and length of device
W, L = int(round(WW/g0.cell[0,0])), int(round(LL/g0.cell[1,1]))
# (nc files should be written ONLY after selection and rearranging of GF/dSE area)
HS_dev = H0.tile(W, 0).tile(L, 1)
if save:
HS_dev.write('HS_DEV.nc')
HS_dev.geom.write('HS_DEV.fdf')
HS_dev.geom.write('HS_DEV.xyz')
# ELECTRODE
n_el = int(round(TSHS_elec.cell[1,1]/g0.cell[1,1]))
HS_elec = H0.tile(W, 0).tile(n_el, 1)
HS_elec.write('HS_ELEC.nc')
HS_elec.geom.write('HS_ELEC.fdf')
HS_elec.geom.write('HS_ELEC.xyz')
# Check bands with primitive cell
if return_bands:
g0_s = si.geom.graphene(bond, C)
g0_s.optimize_nsc()
H0_s = si.Hamiltonian(g0_s, orthogonal=False)
H0_s.construct((ref_r+dR, zip(ref_hop_onshifted, ref_over)))
# Open figure outside and bands will automatically be added to the plot
plot_bandstructure(H0_s, 400, ymin=-3, ymax=3,
style='--', color='r', label='Pristine w/ tip $p_z$ onsite')
return HS_dev
def plot_transmission(H, iE1, iE2, ymin=None, ymax=None, style='-', color='k', label=None,
xshift=0, yshift=0, plus=None, plot=True, lw=1):
print('Plotting transmission from elec {} to elec {} in: {}'.format(iE1, iE2, H))
H = si.get_sile(H)
tr = H.transmission(H.elecs[iE1], H.elecs[iE2])
ax = plt.gca()
if not plot:
return ax, tr
if plus is not None:
ax.plot(H.E+xshift, tr+plus+yshift, style, color=color, label=label, linewidth=lw)
else:
ax.plot(H.E+xshift, tr+yshift, style, color=color, label=label, linewidth=lw)
if ymin is None:
ymin = ax.get_ylim()[0]
if ymax is None:
ymax = ax.get_ylim()[1]
ax.set_ylim(ymin, ymax)
ax.set_ylabel('Transmission')
ax.set_xlabel('$\mathrm{E-E_F}$ $(e\mathrm{V})$')
if plus is not None:
return ax, tr+plus+yshift
else:
return ax, tr+yshift
def plot_transmission_bulk(H, iE, ymin=None, ymax=None, style='-', color='k', label=None, xshift=0, yshift=0):
print('Plotting bulk transmission from elec {} in: {}'.format(iE, H))
H = si.get_sile(H)
tr = H.transmission_bulk(H.elecs[iE])
ax = plt.gca()
ax.plot(H.E+xshift, tr+yshift, style, color=color, label=label)
if ymin is None:
ymin = ax.get_ylim()[0]
if ymax is None:
ymax = ax.get_ylim()[1]
ax.set_ylim(ymin, ymax)
ax.set_ylabel('Transmission')
ax.set_xlabel('$\mathrm{E-E_F}$ $(e\mathrm{V})$')
return ax, tr
def read_bondcurrents(f, idx_elec, only='+', E=0.0, k='avg'):#, atoms=None):
"""Read bond currents from tbtrans output
Parameters
----------
f : string
TBT.nc file
idx_elec : int
the electrode of originating electrons
only : {'+', '-', 'all'}
If "+" is supplied only the positive orbital currents are used, for "-",
only the negative orbital currents are used, else return the sum of both.
E : float or int,
A float for energy in eV, int for explicit energy index
k : bool, int or array_like
whether the returned bond current is k-averaged,
an explicit k-point or a selection of k-points
Returns
-------
bc, nc.E[idx_E], geom
bc : bond currents
nc.E[idx_E] : energy
geom : geometry
"""
print('Reading: {}'.format(f))
nc = si.get_sile(f)
na, na_dev = nc.na, nc.na_dev
print('Total number of atoms: {}'.format(na))
print('Number of atoms in the device region: {}'.format(na_dev))
geom = nc.geom
elec = nc.elecs[idx_elec]
print('Bond-currents from electrode: {}'.format(elec))
# Check 'k' argument
if k == 'avg':
avg = True
elif k == 'Gamma':
kpts = nc.kpt
idx_gamma = np.where(np.sum(np.abs(kpts), axis=1) == 0.)[0]
if (kpts[idx_gamma] != np.zeros((1, 3))).any(axis=1):
print('\nThe selected k-point is not Gamma!\n')
exit(0)
else:
print('You have selected the Gamma point!')
avg = idx_gamma # Index of Gamma point in nc.kpt
else:
print('\nInvalid `k` argument: please keep the default `avg` or use `Gamma`!\n')
exit(0)
idx_E = nc.Eindex(E)
print('Extracting bond-currents at energy: {} eV'.format(nc.E[idx_E]))
bc = nc.bond_current(elec, kavg=avg, isc=[0,0,0], only=only, E=idx_E, uc=True)
return bc, nc.E[idx_E], geom
# bc_coo = nc.bond_current(elec, kavg=avg, isc=[0,0,0], only=only, E=idx_E, uc=True).tocoo()
# i_list = bc_coo.row
# j_list = bc_coo.col
# bc_list = bc_coo.data
# #for i, j, bc in zip(i_list, j_list, bc_list):
# # print('{}\t{}\t{}'.format(i, j, bc))
# print('Number of bond-current entries: {}'.format(np.shape(bc_list)))
# if atoms is not None:
# i_list_new, j_list_new, bc_list_new = [], [], []
# for i, j, bc in zip(i_list, j_list, bc_list):
# if i in atoms and j in atoms:
# i_list_new.append(i)
# j_list_new.append(j)
# bc_list_new.append(bc)
# i_list = np.array(i_list_new)
# j_list = np.array(j_list_new)
# bc_list = np.array(bc_list_new)
# #print('i\tj\tBond-current')
# #for i, j, bc in zip(i_list, j_list, bc_list):
# # print('{}\t{}\t{}'.format(i, j, bc))
# print('MIN bc (from file) = {}'.format(np.min(bc_list)))
# print('MAX bc (from file) = {}'.format(np.max(bc_list)))
# return (geom, i_list, j_list, bc_list, nc.E[idx_E])
def bc_sub(bc, atoms):
"""
bc: bondcurrents object directly from "read_bondcurrent"
atoms: list of selected atoms
"""
# Get data
i_list, j_list, bc_list = bc.tocoo().row, bc.tocoo().col, bc.tocoo().data
# Filter only selected atoms
print('Reading bond-currents among atoms (1-based!!!):')
print(list2range_TBTblock(atoms)) # print 0-based idx as 1-based idx
i_list_new, j_list_new, bc_list_new = [], [], []
for i, j, bc in zip(i_list, j_list, bc_list):
if i in atoms and j in atoms:
i_list_new.append(i)
j_list_new.append(j)
bc_list_new.append(bc)
return np.array(i_list_new), np.array(j_list_new), np.array(bc_list_new)
class Groupby:
def __init__(self, keys):
_, self.keys_as_int = np.unique(keys, return_inverse = True)
self.n_keys = max(self.keys_as_int)
self.set_indices()
def set_indices(self):
self.indices = [[] for i in range(self.n_keys+1)]
for i, k in enumerate(self.keys_as_int):
self.indices[k].append(i)
self.indices = [np.array(elt) for elt in self.indices]
def apply(self, function, vector, broadcast):
if broadcast:
result = np.zeros(len(vector))
for idx in self.indices:
result[idx] = function(vector[idx])
else:
result = np.zeros(self.n_keys)
for k, idx in enumerate(self.indices):
result[self.keys_as_int[k]] = function(vector[idx])
return result
def plot_bondcurrents(f, idx_elec, only='+', E=0.0, k='avg', zaxis=2, avg=True, scale='raw', xyz_origin=None,
vmin=None, vmax=None, lw=5, log=False, adosmap=False, ADOSmin=None, ADOSmax=None, arrows=False,
lattice=False, ps=20, ados=False, atoms=None, out=None, ymin=None, ymax=None, xmin=None, xmax=None,
spsite=None, dpi=180, units='angstrom'):
""" Read bond currents from tbtrans output and plot them
Parameters
----------
f : string
TBT.nc file
idx_elec : int
the electrode of originating electrons
only : {'+', '-', 'all'}
If "+" is supplied only the positive orbital currents are used, for "-",
only the negative orbital currents are used, else return the sum of both.
E : float or int,
A float for energy in eV, int for explicit energy index
k : bool, int or array_like
whether the returned bond current is k-averaged,
an explicit k-point or a selection of k-points
zaxis : int
index of out-of plane direction
avg : bool
if "True", then it averages all currents coming from each atom and plots
them in a homogeneous map
if "False" it plots ALL bond currents as lines originating from each atom
scale : {'%' or 'raw'}
wheter values are percent. Change vmin and vmax accordingly between 0% and 100%
vmin : float
min value in colormap. All data greater than this will be blue
vmax : float
max value in colormap. All data greater than this will be yellow
lattice : bool
whether you want xy coord of atoms plotted as black dots in the figure
ps : float
size of these dots
spsite : list of int
special atoms in the lattice that you want to plot as red dots instead
atoms : np.array or list
list of atoms for which reading and plotting bondcurrents
out : string
name of final png figure
.....
Returns
-------
bc, nc.E[idx_E], geom
bc : bond currents
nc.E[idx_E] : energy
geom : geometry
Notes
-----
- atoms must be 0-based
- Be sure that atoms belong to a single plane (say, only graphene, no tip)
"""
t = time.time()
print('\n***** BOND-CURRENTS (2D map) *****\n')
nc = si.get_sile(f)
elec = nc.elecs[idx_elec]
# Read bond currents from TBT.nc file
bc, energy, geom = read_bondcurrents(f, idx_elec, only, E, k)
# If needed, select only selected atoms from bc_bg.
bc_coo = bc.tocoo()
i_list, j_list, bc_list = bc_coo.row, bc_coo.col, bc_coo.data
if atoms is None:
print('Reading bond-currents among all atoms in device region')
atoms = nc.a_dev
del bc_coo
else:
# Only choose atoms with positive indices
atoms = atoms[atoms >= 0]
select = np.logical_and(np.in1d(i_list, atoms), np.in1d(j_list, atoms))
i_list, j_list, bc_list = i_list[select], j_list[select], bc_list[select]
del bc_coo, select
print('Number of bond-current entries: {}'.format(np.shape(bc_list)))
print('MIN bc among selected atoms (from file) = {}'.format(np.min(bc_list)))
print('MAX bc among selected atoms (from file) = {}'.format(np.max(bc_list)))
#print('i\tj\tBond-current')
#for i, j, bc in zip(i_list, j_list, bc_list):
# print('{}\t{}\t{}'.format(i, j, bc))
# Plot
import matplotlib.collections as collections
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
cmap = cm.viridis
if out is None:
figname = 'BondCurrents_{}_E{}.png'.format(elec, energy)
else:
figname = '{}_{}_E{}.png'.format(out, elec, energy)
fig, ax = plt.subplots()
ax.set_aspect('equal')
if log:
bc_list = np.log(bc_list+1)
norm = LogNorm()
else:
norm=None
if zaxis == 2:
xaxis, yaxis = 0, 1
elif zaxis == 0:
xaxis, yaxis = 1, 2
elif zaxis == 1:
xaxis, yaxis = 0, 2
if avg:
# Plot bond currents as avg 2D map
atoms_sort = np.sort(atoms)
bc_avg = bc.sum(1).A.ravel()[atoms_sort]
if scale is 'radial':
_, r = geom.close_sc(xyz_origin, R=np.inf, idx=atoms_sort, ret_rij=True)
bc_avg = np.multiply(bc_avg, r)
if units == 'angstrom':
unitstr = '$\AA$'
x, y = geom.xyz[atoms_sort, xaxis], geom.xyz[atoms_sort, yaxis]
a_mask = 1.54
elif units == 'nm':
unitstr = 'nm'
x, y = .1*geom.xyz[atoms_sort, xaxis], .1*geom.xyz[atoms_sort, yaxis]
a_mask = .1*1.54
if scale is '%':
if vmin is None:
vmin = np.amin(bc_avg)*100/np.amax(bc_avg)
if vmax is None:
vmax = 100
vmin = vmin*np.amax(bc_avg)/100
vmax = vmax*np.amax(bc_avg)/100
else:
if vmin is None:
vmin = np.amin(bc_avg)
if vmax is None:
vmax = np.amax(bc_avg)
coords = np.column_stack((x, y))
img, min, max = mask_interpolate(coords, bc_avg, oversampling=30, a=a_mask)
# Note that we tell imshow to show the array created by mask_interpolate
# faithfully and not to interpolate by itself another time.
image = ax.imshow(img.T, extent=(min[0], max[0], min[1], max[1]),
origin='lower', interpolation='none', cmap='viridis',
vmin=vmin, vmax=vmax)
else:
if vmin is None:
vmin = np.min(bc_list)
if vmax is None:
vmax = np.max(bc_list)
# Plot bond currents as half-segments
start_list = zip(geom.xyz[i_list, xaxis], geom.xyz[i_list, yaxis])
half_end_list = zip(.5*(geom.xyz[i_list, xaxis]+geom.xyz[j_list, xaxis]),
.5*(geom.xyz[i_list, yaxis]+geom.xyz[j_list, yaxis]))
line_list = list(map(list, zip(start_list, half_end_list))) # segments length = 1/2 bonds length
linewidths = lw * bc_list / np.max(bc_list)
lattice_bonds = collections.LineCollection(line_list, cmap=cmap, linewidths=linewidths, norm=norm)
lattice_bonds.set_array(bc_list/np.amax(bc_list))
lattice_bonds.set_clim(vmin/np.amax(bc_list), vmax/np.amax(bc_list))
ax.add_collection(lattice_bonds)
image = lattice_bonds
if lattice:
if units == 'angstrom':
x, y = geom.xyz[atoms, xaxis], geom.xyz[atoms, yaxis]
if units == 'nm':
x, y = .1*geom.xyz[atoms, xaxis], .1*geom.xyz[atoms, yaxis]
ax.scatter(x, y, s=ps*2, marker='o', facecolors='None', linewidth=0.8, edgecolors='k')
if spsite is not None:
if units == 'angstrom':
xs, ys = geom.xyz[spsite, xaxis], geom.xyz[spsite, yaxis]
if units == 'nm':
xs, ys = .1*geom.xyz[spsite, xaxis], .1*geom.xyz[spsite, yaxis]
ax.scatter(xs, ys, s=ps*2, marker='x', color='red')
ax.autoscale()
ax.margins(0.)
#ax.margins(0.05)
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.xlabel('x ({})'.format(unitstr))
plt.ylabel('y ({})'.format(unitstr))
plt.gcf()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
if avg:
axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin, vmax])
if vmin == 0.:
axcb.ax.set_yticklabels(['0', '$\geq$ {:.3e}'.format(vmax)])
else:
axcb.ax.set_yticklabels(['$\leq$ {:.3e}'.format(vmin), '$\geq$ {:.3e}'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))
else:
axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin/np.amax(bc_list), vmax/np.amax(bc_list)])
if scale is '%':
vmin, vmax = vmin*100/max_newbc_bg, vmax*100/max_newbc_bg
axcb.ax.set_yticklabels(['{:.1f} %'.format(vmin), '{:.1f} %'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {:.1f} %'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {:.1f} %'.format(vmax))
else:
axcb.ax.set_yticklabels(['{:.3e}'.format(vmin), '{:.3e}'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))
plt.savefig(figname, bbox_inches='tight', transparent=True, dpi=dpi)
print('Successfully plotted to "{}"'.format(figname))
print('Done in {} sec'.format(time.time() - t))
return bc_list, vmin, vmax, i_list, j_list
def plot_bondcurrents_old(f, idx_elec, sum='+', E=0.0, k='avg', f_bg=None, percent_bg=False,
vmin=None, vmax=None, lw=5, log=False, adosmap=False, ADOSmin=None, ADOSmax=None, arrows=False,
lattice=False, ps=20, ados=False, atoms=None, out=None, ymin=None, ymax=None, dpi=180):
"""
atoms must be 0-based
"""
t = time.time()
print('\n***** BOND-CURRENTS (2D map) *****\n')
nc = si.get_sile(f)
elec = nc.elecs[idx_elec]
# Read bond currents from TBT.nc file
bc, energy, geom = read_bondcurrents(f, idx_elec, sum, E, k)
# Read and subtract extra bc, if necessary
if f_bg:
#geom must be the same!!!
print('\n - Subtracting bondcurrents from {}'.format(f_bg))
bc_bg = read_bondcurrents(f_bg, idx_elec, sum, E, k)[0]
if percent_bg:
# If needed, select only selected atoms from bc_bg.
# Then get max bc value to be used later
if atoms is None:
newbc_bg = bc_bg.tocoo().data
else:
if atoms[0] < 0:
# if atoms is a list of negative numbers, use all atoms except them
atoms = list(set(nc.a_dev).difference(set(-np.asarray(atoms))))
newbc_bg = bc_sub(bc_bg, atoms)[2]
max_newbc_bg = np.amax(newbc_bg)
bc -= bc_bg
bc.eliminate_zeros()
# If needed, select only selected atoms from bc_bg.
if atoms is None:
print('Reading bond-currents among all atoms in device region')
atoms = nc.a_dev
i_list, j_list, bc_list = bc.tocoo().row, bc.tocoo().col, bc.tocoo().data
else:
if atoms[0] < 0:
# if atoms is a list of negative numbers, use all atoms except them
atoms = list(set(nc.a_dev).difference(set(-np.asarray(atoms))))
i_list, j_list, bc_list = bc_sub(bc, atoms)
print('Number of bond-current entries: {}'.format(np.shape(bc_list)))
print('MIN bc among selected atoms (from file) = {}'.format(np.min(bc_list)))
print('MAX bc among selected atoms (from file) = {}'.format(np.max(bc_list)))
#print('i\tj\tBond-current')
#for i, j, bc in zip(i_list, j_list, bc_list):
# print('{}\t{}\t{}'.format(i, j, bc))
# Plot
import matplotlib.collections as collections
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
cmap = cm.viridis
if out is None:
figname = 'BondCurrents_{}_E{}.png'.format(elec, energy)
else:
figname = '{}_{}_E{}.png'.format(out, elec, energy)
fig, ax = plt.subplots()
ax.set_aspect('equal')
# Plot bond currents as half segments starting from the atoms
start_list = zip(geom.xyz[i_list, 0], geom.xyz[i_list, 1])
half_end_list = zip(.5*(geom.xyz[i_list, 0]+geom.xyz[j_list, 0]),
.5*(geom.xyz[i_list, 1]+geom.xyz[j_list, 1]))
line_list = list(map(list, zip(start_list, half_end_list))) # segments length = 1/2 bonds length
#end_list = zip(geom.xyz[j_list, 0], geom.xyz[j_list, 1])
#line_list = list(map(list, zip(start_list, end_list))) # segments length = bonds length
if log:
bc_list = np.log(bc_list+1)
norm = LogNorm()
else:
norm=None
if ados:
# Plot ADOS
ADOS = read_ADOS(f, idx_elec, E, k, atoms)[2]
x, y = geom.xyz[atoms, 0], geom.xyz[atoms, 1]
if ADOSmin is None:
ADOSmin = np.min(ADOS)
if ADOSmax is None:
ADOSmax = np.max(ADOS)
if adosmap:
coords = np.column_stack((x, y))
values = np.array(ADOS)
img, min, max = mask_interpolate(coords, values, oversampling=15)
# Note that we tell imshow to show the array created by mask_interpolate
# faithfully and not to interpolate by itself another time.
image = ax.imshow(img.T, extent=(min[0], max[0], min[1], max[1]),
origin='lower', interpolation='none', cmap='viridis',
vmin=ADOSmin, vmax=ADOSmax)
else:
colors = ADOS
area = 300 # * ADOS / np.max(ADOS)
image = ax.scatter(x, y, c=colors, s=area, marker='o', edgecolors='None',
cmap=cmap, norm=norm)
image.set_clim(ADOSmin, ADOSmax)
image.set_array(ADOS)
# Plot bond-currents
if arrows: # NOT WORKING
lattice_bonds = ax.quiver(np.array(start_list[0]), np.array(start_list[1]),
np.subtract(np.array(half_end_list[0]), | np.array(start_list[0]) | numpy.array |
"""reproduces discrete spi inversion results from LfL paper (sect.6.2)."""
from __future__ import print_function
from grid import Grid
from mdp_utils import sample_trajectory
from mdp_utils import score_policy
from mdp_utils import softmax
from mdp_utils import solve_entropy_regularized_mdp
import numpy as np
import torch
from torch.distributions import Categorical
import torch.nn as nn
def main():
# set hyperparameters
gride_size = 5
n_states = gride_size**2
n_actions = 4
gamma = 0.96
alpha = 0.3
alpha_model = 0.7
entropy_coef = 0.01
n_epoch = 10
kmax = 3
tmax = 1000
n_run = 100
# generate a deterministic gridworld:
g = Grid(gride_size, stochastic=False)
# we just need the reward and dynamic of the MDP:
r, p = g.make_tables()
# solve entropy-regularized MDP:
_, j_pi_star = solve_entropy_regularized_mdp(r, p, alpha, gamma)
print('optimal score =', j_pi_star)
learner_score = []
learner_regret = []
observer_score = []
observer_regret = []
for run in range(n_run):
print('run', run)
print('---------')
np.random.seed(run)
torch.manual_seed(run)
# init first policy
pi = np.ones((n_states, n_actions))/n_actions
# sample initial trajectory:
trajectory = sample_trajectory(p, pi, tmax)
# transition estimation:
p_ = np.ones((n_states, n_actions, n_states)) * 1e-15
count = np.ones((n_states, n_actions, n_states)) * n_states * 1e-15
for (s, a), (s_, _) in zip(trajectory[:-1], trajectory[1:]):
p_[s, a, s_] += 1
count[s, a, :] += 1
p_ /= count
demos = [trajectory]
policies = [pi]
# policy iterations
for k in range(kmax):
print('learner step', k)
q = np.random.rand(n_states, n_actions)
for _ in range(1000):
v = np.zeros(n_states)
for state in range(n_states):
for action_ in range(n_actions):
v[state] += pi[state, action_] * \
(q[state, action_] - alpha * np.log(pi[state, action_]))
q *= 0
for state in range(n_states):
for action in range(n_actions):
q[state, action] = r[state, action]
for state_ in range(n_states):
q[state, action] += gamma*p[state, action, state_]*v[state_]
pi = np.zeros((n_states, n_actions))
for state in range(n_states):
pi[state, :] = softmax(q[state, :]/alpha)
# sample trajectory with new policy:
trajectory = sample_trajectory(p, pi, tmax)
policies.append(pi)
demos.append(trajectory)
# learner score
j_pi_learner = score_policy(pi, r, p, alpha, gamma)
print('learner score ', j_pi_learner, j_pi_star - j_pi_learner)
learner_score.append(j_pi_learner)
learner_regret.append(j_pi_star -j_pi_learner)
# estimate learner policies
torch_p = torch.from_numpy(p_).float()
logpi_ = tuple(nn.Parameter(torch.rand(n_states, n_actions, \
requires_grad=True)) \
for _ in range(kmax+1))
optimizer_pi = torch.optim.Adam(logpi_, lr=5e-1)
for epoch in range(n_epoch):
loss_pi = 0
for k, demo in enumerate(demos):
demo_sas = [(s, a, s_) for (s, a), (s_, _) in zip(demo[:-1], demo[1:])]
for s, a, s_ in demo_sas:
dist = Categorical(torch.exp(logpi_[k][s, :]))
log_prob_demo = torch.log(dist.probs[a])
loss_pi -= (log_prob_demo + entropy_coef * dist.entropy())
optimizer_pi.zero_grad()
loss_pi.backward()
optimizer_pi.step()
if epoch%1 == 0:
print('policy estimation epoch', epoch, 'loss_pi', loss_pi.item())
# create target reward functions:
targets = []
for k, demo in enumerate(demos[:-1]):
dist_2 = torch.exp(logpi_[k+1]) \
/ torch.exp(logpi_[k+1]).sum(1, keepdim=True)
dist_1 = torch.exp(logpi_[k]) / torch.exp(logpi_[k]).sum(1, keepdim=True)
kl = torch.log(dist_2) - torch.log(dist_1)
r_shape = torch.zeros(n_states, n_actions)
for state in range(n_states):
for action in range(n_actions):
r_shape[state, action] = alpha_model \
* torch.log(dist_2[state, action])
for state_ in range(n_states):
for action_ in range(n_actions):
r_shape[state, action] -= alpha_model * gamma \
* (kl[state_, action_]) * torch_p[state, action, state_] \
* dist_1[state_, action_]
targets.append(r_shape)
# recover state-action reward and shaping
r_ = nn.Parameter(torch.zeros(n_states, n_actions, requires_grad=True))
r_sh = (r_,) + tuple(nn.Parameter(torch.zeros(n_states, requires_grad=True))\
for _ in range(kmax))
optimizer = torch.optim.Adam(r_sh, lr=1)
for epoch in range(200):
loss = 0
for k, target in enumerate(targets):
loss += \
((r_sh[0]+ r_sh[k+1].repeat(n_actions, 1).t() - gamma * \
torch.sum(torch_p * r_sh[k+1].repeat(n_states, n_actions, 1), 2)\
- target.detach())**2).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
r_ = r_.detach().numpy()
# solve with r_:
pi_observer, _ = solve_entropy_regularized_mdp(r_, p, alpha, gamma)
# observer score with true reward:
j_pi_observer = score_policy(pi_observer, r, p, alpha, gamma)
print('observer score ', j_pi_observer, j_pi_star - j_pi_observer)
observer_score.append(j_pi_observer)
observer_regret.append(j_pi_star - j_pi_observer)
print('learner_score', np.mean(learner_score),
np.sqrt(np.var(learner_score)))
print('learner_regret', np.mean(learner_regret),
np.sqrt(np.var(learner_regret)))
print('observer_score', np.mean(observer_score),
np.sqrt(np.var(observer_score)))
print('observer_regret', | np.mean(observer_regret) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
import hrgames as hrg
import sys
def classic_pd(R=None, tf=None, xini=None):
"""Plays a classic prisoner's dilemma between two players"""
# Two connected players
A = np.zeros([2, 2])
A[0, 1] = 1
A[1, 0] = 1
# Classic positive prisoner's dilemma
B = np.zeros([2, 2])
B[0, 0] = 3 # R
B[0, 1] = 1 # S
B[1, 0] = 4 # T
B[1, 1] = 2 # P
# Relationship matrix
if R is None or R.shape != (2, 2):
R = np.zeros([2, 2], dtype='double')
R[0, 0] = 2/3 + 0.05
R[0, 1] = 1/3 - 0.05
R[1, 1] = 2/3 - 0.05
R[1, 0] = 1/3 + 0.05
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (6, 2):
xini = np.divide(np.ones([2, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf-t0)*10
h = (tf-t0)/n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = np.arange(t0, tf+h, h)
plt.plot(xaxis, x[0, 0, :], 'r', label='Player 1')
plt.plot(xaxis, x[1, 0, :], 'b', label='Player 2', alpha=0.7)
plt.ylim([-0.05, 1.05])
plt.legend(['Player 1', 'Player 2'])
plt.title("Cooperation probability")
plt.show()
plt.close()
return None
def classic_pd_negative(R=None, tf=None, xini=None):
"""Plays a classic prisoner's dilemma between two players"""
# Two connected players
A = np.zeros([2, 2])
A[0, 1] = 1
A[1, 0] = 1
# Classic negative prisoner's dilemma
B = np.zeros([2, 2])
B[0, 0] = -2 # R
B[0, 1] = -7 # S
B[1, 0] = 0 # T
B[1, 1] = -5 # P
# Relationship matrix
if R is None or R.shape != (2, 2):
R = np.zeros([2, 2], dtype='double')
R[0, 0] = 5/7 - 0.05
R[0, 1] = 2/7 + 0.05
R[1, 1] = 5/7 + 0.05
R[1, 0] = 2/7 - 0.05
# Initial Condition, 0.5 in all strategies for all players
if xini is None or xini.shape != (2, 2):
xini = np.divide(np.ones([2, 2], dtype='double'), 2)
# Time interval and number of steps
t0 = 0
if tf is None:
tf = 150
n = (tf-t0)*10
h = (tf-t0)/n
x = hrg.hr_game(t0, tf, n, A, B, R, xini)
# Plot results
xaxis = | np.arange(t0, tf+h, h) | numpy.arange |
#!/usr/bin/env python3
from progress.bar import Bar, ChargingBar
import os, time, random
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import normal, uniform
from numpy import pi
from step import simulador
C = 10 ** -3
def random_position(Ym, perturbations, Ysd=None, normal_=True, dist=None):
'''
Genera un arreglo de normales donde cada elemento i tiene
media mean_y[i] y varianza sd_y[i].
param mean_y: arreglo con medias
param sd_y: arreglo de varianzas
param perturbations: una lista con las posiciones que se
perturbaran
regresa: arreglo de normales
'''
per = perturbations
if normal_:
g = lambda x, y: normal(x, y)
Y = [g(x, y) if p == 1 else 0 for x, y, p in zip(Ym, Ysd, per)]
else:
a = Ym - dist
b = Ym + dist
g = lambda x, y: | uniform(x,y) | numpy.random.uniform |
# -*- coding: utf-8 -*-
"""
@created on: 9/21/19,
@author: <NAME>,
@version: v0.0.1
@system name: badgod
Description:
..todo::
"""
import numpy as np
import random
from collections import defaultdict
"""
Monte-Carlo
In this problem, we will implememnt an AI player for Blackjack.
The main goal of this problem is to get familar with Monte-Carlo algorithm.
We can test the correctness of our code
by typing 'nosetests -v mc_test.py' in the terminal.
"""
def initial_policy(observation):
"""A policy that sticks if the player score is >= 20 and his otherwise
Parameters:
-----------
observation:
Returns:
--------
action: 0 or 1
0: STICK
1: HIT
"""
return 0 if observation[0] >= 20 else 1
def play_step(env, action_to_take):
"""
Given the action to be taken, plays a step in the environment and returns the new set of values
:param env: function
OpenAI gym environment.
:param action_to_take: int
Action index to be taken for the current step.
:return: next_state: 3-tuple
(Player's sum, Dealer's sum, Boolean indicating if the player has ACE).
:return: reward: int
Reward received for choosing the given action
:return: done: boolean
Boolean indicating if the state is a terminal or not.
"""
next_state, reward, done, info = env.step(action_to_take)
return next_state, reward, done
def get_random_episode(env, policy):
"""
Generates a list having episodes. Each episode in this list is generated until a terminal state is reached.
:param env: function
OpenAI gym environment.
:param policy: function
The policy to be followed while choosing an action.
:return: list
List of generated episodes
"""
new_set_of_episodes = []
current_state = env.reset()
while True:
action_to_take = policy(current_state)
next_state, reward, done = play_step(env, action_to_take)
new_set_of_episodes.append((current_state, action_to_take, reward))
if done:
break
current_state = next_state
return new_set_of_episodes
def mc_prediction(policy, env, n_episodes, gamma=1.0):
"""
Given policy using sampling to calculate the value function
by using Monte Carlo first visit algorithm.
:param policy: function
A function that maps an observation to action probabilities
:param env: function
OpenAI gym environment
:param n_episodes: int
Number of episodes to sample
:param gamma: float
Gamma discount factor
:return V: defaultdict(float)
A dictionary that maps from state to value
"""
# initialize empty dictionaries
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# a nested dictionary that maps state -> value
V = defaultdict(float)
for i in range(n_episodes):
new_set_of_episodes = get_random_episode(env, policy)
states_set = set([episode[0] for episode in new_set_of_episodes])
for i, state in enumerate(states_set):
first_occurance = next(i for i, x in enumerate(new_set_of_episodes) if x[0] == state)
total_reward = sum([(int(gamma) ** power) * episode[2] for power, episode in
enumerate(new_set_of_episodes[first_occurance:])])
returns_sum[state] += total_reward
returns_count[state] += 1
V[state] = returns_sum[state] / returns_count[state]
return V
def epsilon_greedy(Q, state, nA, epsilon=0.1):
"""
A epsilon-greedy method to generate random action based on Q state
:param Q: dict()
A dictionary that maps from state -> action-values,
where Q[s][a] is the estimated action value corresponding to state s and action a.
:param state: int
current state
:param nA: int
Number of actions in the environment
:param epsilon: float
The probability to select a random action, range between 0 and 1
:return: int
action based current state
Hints:
------
With probability (1 − epsilon) choose the greedy action.
With probability epsilon choose an action at random.
"""
# A = np.ones(nA) * epsilon / float(nA)
# best_action, prob_for_best_action = np.argmax(Q[state]), max(Q[state])
# if prob_for_best_action > epsilon:
# A[best_action] += (1.0 - epsilon)
# return np.random.choice(np.arange(len(A)), p=A)
# else:
# return np.random.choice(np.arange(len(A)))
actions = np.ones(nA) * epsilon / float(nA)
best_current_action = np.argmax(Q[state])
actions[best_current_action] += (1.0 - epsilon)
return np.random.choice(np.arange(len(actions)), p=actions)
def generate_random_episode_greedy(Q, nA, epsilon, env):
"""
Generate episodes using epsilon greedy action chooser method.
:param Q: dict()
A dictionary that maps from state -> action-values,
where Q[s][a] is the estimated action value corresponding to state s and action a.
:param nA: int
Number of actions in the environment
:param epsilon: float
The probability to select a random action, range between 0 and 1
:param env: function
OpenAI gym environment
:return: list
List of generated episodes
"""
new_set_of_episodes = []
current_state = env.reset()
while True:
action_to_take = epsilon_greedy(Q, current_state, nA, epsilon)
next_state, reward, done = play_step(env, action_to_take)
new_set_of_episodes.append((current_state, action_to_take, reward))
if done:
break
current_state = next_state
return new_set_of_episodes
def mc_control_epsilon_greedy(env, n_episodes, gamma=1.0, epsilon=0.1):
"""
Monte Carlo control with exploring starts.
Find an optimal epsilon-greedy policy.
:param env: function
OpenAI gym environment
:param n_episodes: int
Number of episodes to sample
:param gamma: float
Gamma discount factor
:param epsilon: float
The probability to select a random action, range between 0 and 1
:return: Q: dict()
A dictionary that maps from state -> action-values,
where Q[s][a] is the estimated action value corresponding to state s and action a.
Hint:
-----
You could consider decaying epsilon, i.e. epsilon = epsilon-(0.1/n_episodes) during each episode
and episode must > 0.
"""
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# a nested dictionary that maps state -> (action -> action-value)
Q = defaultdict(lambda: | np.zeros(env.action_space.n) | numpy.zeros |
import pathlib
import pickle
import time
from collections import defaultdict
import torch
import numpy as np
from skimage import io as imgio
from libs.ops import box_np_ops
from libs import preprocess as prep
from libs.geometry import points_in_convex_polygon_3d_jit
from libs.ops.point_cloud.bev_ops import points_to_bev
from data import kitti_common as kitti
def merge_second_batch(batch_list, _unused=False):
example_merged = defaultdict(list)
for example in batch_list:
for k, v in example.items():
example_merged[k].append(v)
ret = {}
example_merged.pop("num_voxels")
for key, elems in example_merged.items():
if key in [
'voxels', 'num_points', 'num_gt', 'gt_boxes', 'voxel_labels',
'match_indices'
]:
ret[key] = np.concatenate(elems, axis=0)
elif key == 'match_indices_num':
ret[key] = np.concatenate(elems, axis=0)
elif key == 'coordinates':
coors = []
for i, coor in enumerate(elems):
coor_pad = np.pad(
coor, ((0, 0), (1, 0)),
mode='constant',
constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
else:
ret[key] = np.stack(elems, axis=0)
return ret
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
class_names=['Car'],
remove_outside_points=False,
training=True,
create_targets=True,
shuffle_points=False,
reduce_valid_area=False,
remove_unknown=False,
gt_rotation_noise=[-np.pi / 3, np.pi / 3],
gt_loc_noise_std=[1.0, 1.0, 1.0],
global_rotation_noise=[-np.pi / 4, np.pi / 4],
global_scaling_noise=[0.95, 1.05],
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=[0.78, 2.35],
generate_bev=False,
without_reflectivity=False,
num_point_features=4,
anchor_area_threshold=1,
gt_points_drop=0.0,
gt_drop_max_keep=10,
remove_points_after_sample=True,
anchor_cache=None,
remove_environment=False,
random_crop=False,
reference_detections=None,
add_rgb_to_points=False,
lidar_input=False,
unlabeled_db_sampler=None,
out_size_factor=2,
min_gt_point_dict=None,
bev_only=False,
use_group_id=False,
out_dtype=np.float32):
"""convert point cloud to voxels, create targets if ground truths
exists.
"""
points = input_dict["points"]
if training:
gt_boxes = input_dict["gt_boxes"]
gt_names = input_dict["gt_names"]
difficulty = input_dict["difficulty"]
group_ids = None
if use_group_id and "group_ids" in input_dict:
group_ids = input_dict["group_ids"]
rect = input_dict["rect"]
Trv2c = input_dict["Trv2c"]
P2 = input_dict["P2"]
unlabeled_training = unlabeled_db_sampler is not None
image_idx = input_dict["image_idx"]
if reference_detections is not None:
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(reference_detections, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
masks = points_in_convex_polygon_3d_jit(points, surfaces)
points = points[masks.any(-1)]
if remove_outside_points and not lidar_input:
image_shape = input_dict["image_shape"]
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
image_shape)
if remove_environment is True and training:
selected = kitti.keep_arrays_by_name(gt_names, class_names)
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
points = prep.remove_points_outside_boxes(points, gt_boxes)
if training:
# print(gt_names)
selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
if remove_unknown:
remove_mask = difficulty == -1
"""
gt_boxes_remove = gt_boxes[remove_mask]
gt_boxes_remove[:, 3:6] += 0.25
points = prep.remove_points_in_boxes(points, gt_boxes_remove)
"""
keep_mask = | np.logical_not(remove_mask) | numpy.logical_not |
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
# For type hinting
from typing import Tuple
from numpy import array as np_arr
from pandas import DataFrame as pandasDF
# Project imports
from nn import strat_configs, model, utils
def main(config):
'''
Perform the out-of-sample testing for the nn. In this testing the nn acts
as an advisor, meanining that it predicts if the strategy buy signal is
true or false.
Parameters
----------
config : dict
Config params for the out of sample test.
Returns
-------
None
'''
# Adjust the config file with the saved settings from the data generation
config = utils.add_gen_config(config)
# Get the strategy config for what the nn was trained on
strat_config = strat_configs.get_config(config['strat name'])
# Get the testing data
df_test, data, true_labels = get_testing_data(config,
strat_config,
)
# Load in the nn model for the predictions
nn_model = load_model('nn/models/' + config['model save name'])
# Make the predictions and find the class labels
predict = nn_model.predict(data.astype('float32'))
labels = np.argmax(predict, axis = 1)
# Make a new df to signify the return
df_test['predicted'] = labels
df_test['surety'] = predict[np.arange(0, labels.shape[0]), labels]
# Save the testing to a csv file so that future MC trials can be performed
df_test.to_csv('nn/data/' + config['strat name'] + ' out sample test.csv',
index = False)
# Plot the confusion matrix
utils.get_confusion_matrix(true_labels,
labels,
config,
'Out of sample test'
)
return get_stats(df_test, config)
def get_testing_data(config: dict,
strat_config: dict) -> Tuple[pandasDF, np_arr, np_arr]:
'''
Get the input numpy array for the dat
Parameters
----------
config : dict
Config params for the out of sample test.
strat_config : dict
Config params for the strategy the nn was trained on
Returns
-------
df : pandasDF
The testing dataframe, filtered to only the ticker/profit/label columns
data : np_arr
The data to feed into the NN
labels: np_arr
The true labels for the confusion matrix
'''
# Load in the dataframe with the tickers to test with
df = pd.read_csv('nn/data/' + config['strat name'] + ' testing.csv')
# Reset the index of the dataframe to remove the set with copy warning
df = df.reset_index().drop(columns = ['index', 'ticker', 'Date'])
# Change the df to values, and reshape if an LSTM-type network has been used
data = df.values[:, :-2]
if config['model type'] in ['lstm', 'bidirectional']:
data = model.reshape_rnn(data,
max(config['time lags']),
)
return df[['Profit/Loss', 'labels']], data, df['labels'].values
def get_stats(df: pandasDF,
config: dict):
'''
Get the statistics of how the strategy performs with and without the NN
Parameters
----------
df : pandasDF
A dataframe of trades made for this strategy
config : dict
Config params for the out of sample test
Returns
-------
None
'''
# First, deduce how the strategy performed without the NN to assist
print('\nPerformance without the NN')
print_stats(df['Profit/Loss'].values)
# Filter the dataframe to only consider the minimum class and surety level
df = df[(df['predicted'] >= config['min class'])
& (df['surety'] > config['surety'])]
# Now determine how the strategy performed with the NN
print('\nPerformance with the NN')
print_stats(df['Profit/Loss'].values)
return
def print_stats(percs: np_arr):
'''
Given an array of profit/loss percentages, print the statistics.
Parameters
----------
percs : np_arr
The profit/loss from each trade
Returns
-------
None
'''
if percs.shape[0] > 0:
print('Number of trades: ', percs.shape[0])
print('Win rate: ', percs[percs > 0].shape[0]/percs.shape[0])
print('Mean profit: ', | np.mean(percs) | numpy.mean |
from __future__ import absolute_import, division
import sys
import argparse
import numpy as np
from numpy.linalg.linalg import LinAlgError
import astropy.io.fits as pyfits
from numpy.polynomial.legendre import legval,legfit
from scipy.signal import fftconvolve
import specter.psf
from lvmspec.io import read_image
from lvmutil.log import get_logger
from lvmspec.linalg import cholesky_solve,cholesky_solve_and_invert
from lvmspec.interpolation import resample_flux
def read_psf_and_traces(psf_filename) :
"""
Reads PSF and traces in PSF fits file
Args:
psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
Returns:
psf : specter PSF object
xtrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ytrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log=get_logger()
psf=None
xtrace=None
ytrace=None
wavemin=None
wavemax=None
wavemin2=None
wavemax2=None
fits_file = pyfits.open(psf_filename)
try :
psftype=fits_file[0].header["PSFTYPE"]
except KeyError :
psftype=""
if psftype=="GAUSS-HERMITE" :
psf = specter.psf.GaussHermitePSF(psf_filename)
elif psftype=="SPOTGRID" :
psf = specter.psf.SpotGridPSF(psf_filename)
# now read trace coefficients
log.info("psf is a '%s'"%psftype)
if psftype == "bootcalib" :
wavemin = fits_file[0].header["WAVEMIN"]
wavemax = fits_file[0].header["WAVEMAX"]
xcoef = fits_file[0].data
ycoef = fits_file[1].data
wavemin2 = wavemin
wavemax2 = wavemax
elif "XTRACE" in fits_file :
xtrace=fits_file["XTRACE"].data
ytrace=fits_file["YTRACE"].data
wavemin=fits_file["XTRACE"].header["WAVEMIN"]
wavemax=fits_file["XTRACE"].header["WAVEMAX"]
wavemin2=fits_file["YTRACE"].header["WAVEMIN"]
wavemax2=fits_file["YTRACE"].header["WAVEMAX"]
elif psftype == "GAUSS-HERMITE" :
table=fits_file["PSF"].data
i=np.where(table["PARAM"]=="X")[0][0]
wavemin=table["WAVEMIN"][i]
wavemax=table["WAVEMAX"][i]
xtrace=table["COEFF"][i]
i=np.where(table["PARAM"]=="Y")[0][0]
ytrace=table["COEFF"][i]
wavemin2=table["WAVEMIN"][i]
wavemax2=table["WAVEMAX"][i]
if xtrace is None or ytrace is None :
raise ValueError("could not find XTRACE and YTRACE in psf file %s"%psf_filename)
if wavemin != wavemin2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMIN %f %f"%(wavemin,wavemin2))
if wavemax != wavemax2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMAX %f %f"%(wavemax,wavemax2))
if xtrace.shape[0] != ytrace.shape[0] :
raise ValueError("XTRACE and YTRACE don't have same number of fibers %d %d"%(xtrace.shape[0],ytrace.shape[0]))
fits_file.close()
return psf,xtrace,ytrace,wavemin,wavemax
def write_traces_in_psf(input_psf_filename,output_psf_filename,xcoef,ycoef,wavemin,wavemax) :
"""
Writes traces in a PSF.
Args:
input_psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
output_psf_filename : Path to output fits file which has to contain XTRACE and YTRACE HDUs
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log = get_logger()
psf_fits=pyfits.open(input_psf_filename)
psftype=psf_fits[0].header["PSFTYPE"]
modified_x=False
modified_y=False
if psftype=="GAUSS-HERMITE" :
if "X" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="X")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != xcoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(xcoef.shape)))
n0=min(ishape[0],xcoef.shape[0])
n1=min(ishape[1],xcoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=xcoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_x=True
if "Y" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="Y")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != ycoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(ycoef.shape)))
n0=min(psf_fits["PSF"].data["COEFF"][i].shape[0],ycoef.shape[0])
n1=min(psf_fits["PSF"].data["COEFF"][i].shape[1],ycoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=ycoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_y=True
if "XTRACE" in psf_fits :
psf_fits["XTRACE"].data = xcoef
psf_fits["XTRACE"].header["WAVEMIN"] = wavemin
psf_fits["XTRACE"].header["WAVEMAX"] = wavemax
modified_x=True
if "YTRACE" in psf_fits :
psf_fits["YTRACE"].data = ycoef
psf_fits["YTRACE"].header["WAVEMIN"] = wavemin
psf_fits["YTRACE"].header["WAVEMAX"] = wavemax
modified_y=True
if not modified_x :
log.error("didn't change the X coefs in the psf: I/O error")
raise IOError("didn't change the X coefs in the psf")
if not modified_y :
log.error("didn't change the Y coefs in the psf: I/O error")
raise IOError("didn't change the Y coefs in the psf")
psf_fits.writeto(output_psf_filename,clobber=True)
log.info("wrote traces and psf in %s"%output_psf_filename)
def legx(wave,wavemin,wavemax) :
"""
Reduced coordinate (range [-1,1]) for calls to legval and legfit
Args:
wave : ND np.array
wavemin : float, min. val
wavemax : float, max. val
Returns:
array of same shape as wave
"""
return 2.*(wave-wavemin)/(wavemax-wavemin)-1.
# beginning of routines for cross-correlation method for trace shifts
def boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7) :
"""
Fast boxcar extraction of spectra from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 7
Returns:
flux : 2D np.array of shape (nfibers,n0=image.shape[0]), sum of pixel values per row of length=width per fiber
ivar : 2D np.array of shape (nfibers,n0), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
wave : 2D np.array of shape (nfibers,n0), determined from the traces
"""
log=get_logger()
log.info("Starting boxcar extraction...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
var=np.zeros(image.ivar.size)
ok=image.ivar.ravel()>0
var[ok] = 1./image.ivar.ravel()[ok]
var=var.reshape(image.ivar.shape)
badimage=(image.ivar==0)
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
frame_flux = np.zeros((fibers.size,n0))
frame_ivar = np.zeros((fibers.size,n0))
frame_wave = np.zeros((fibers.size,n0))
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
for f,fiber in enumerate(fibers) :
log.info("extracting fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
frame_wave[f] = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = np.floor( legval(legx(frame_wave[f], wavemin, wavemax), xcoef[fiber]) + 0.5 ).astype(int)
mask=((xx.T>=x_of_y-hw)&(xx.T<=x_of_y+hw)).T
frame_flux[f]=image.pix[mask].reshape((n0,width)).sum(-1)
tvar=var[mask].reshape((n0,width)).sum(-1)
frame_ivar[f]=(tvar>0)/(tvar+(tvar==0))
bad=(badimage[mask].reshape((n0,width)).sum(-1))>0
frame_ivar[f,bad]=0.
return frame_flux, frame_ivar, frame_wave
def resample_boxcar_frame(frame_flux,frame_ivar,frame_wave,oversampling=2) :
"""
Resamples the spectra in a frame obtained with boxcar extraction to the same wavelength grid, with oversampling.
Uses resample_flux routine.
Args:
frame_flux : 2D np.array of shape (nfibers,nwave), sum of pixel values per row of length=width per fiber
frame_ivar : 2D np.array of shape (nfibers,nwave), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
frame_wave : 2D np.array of shape (nfibers,nwave), determined from the traces
Optional:
oversampling : int , oversampling factor , default is 2
Returns:
flux : 2D np.array of shape (nfibers,nwave*oversampling)
ivar : 2D np.array of shape (nfibers,nwave*oversampling)
frame_wave : 1D np.array of size (nwave*oversampling)
"""
log=get_logger()
log.info("resampling with oversampling")
nfibers=frame_flux.shape[0]
wave=frame_wave[nfibers//2]
dwave=np.median(np.gradient(frame_wave))/oversampling
wave=np.linspace(wave[0],wave[-1],int((wave[-1]-wave[0])/dwave))
nwave=wave.size
flux=np.zeros((nfibers,nwave))
ivar=np.zeros((nfibers,nwave))
for i in range(nfibers) :
log.info("resampling fiber #%03d"%i)
flux[i],ivar[i] = resample_flux(wave, frame_wave[i],frame_flux[i],frame_ivar[i])
return flux,ivar,wave
def compute_dy_from_spectral_cross_correlation(flux,wave,refflux,ivar=None,hw=3.,deg=2) :
"""
Measure y offsets from two spectra expected to be on the same wavelength grid.
refflux is the assumed well calibrated spectrum.
A relative flux calibration of the two spectra is done internally.
Args:
flux : 1D array of spectral flux as a function of wavelenght
wave : 1D array of wavelength (in Angstrom)
refflux : 1D array of reference spectral flux
Optional:
ivar : 1D array of inverse variance of flux
hw : half width in Angstrom of the cross-correlation chi2 scan, default=3A corresponding approximatly to 5 pixels for DESI
deg : degree of polynomial fit as a function of wavelength, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
# absorb differences of calibration (fiberflat not yet applied)
x=(wave-wave[wave.size//2])/500.
kernel=np.exp(-x**2/2)
f1=fftconvolve(flux,kernel,mode='same')
f2=fftconvolve(refflux,kernel,mode='same')
scale=f1/f2
refflux *= scale
error_floor=0.01 #A
if ivar is None :
ivar=np.ones(flux.shape)
dwave=wave[1]-wave[0]
ihw=int(hw/dwave)+1
chi2=np.zeros((2*ihw+1))
ndata=np.sum(ivar[ihw:-ihw]>0)
for i in range(2*ihw+1) :
d=i-ihw
b=ihw+d
e=-ihw+d
if e==0 :
e=wave.size
chi2[i] = np.sum(ivar[ihw:-ihw]*(flux[ihw:-ihw]-refflux[b:e])**2)
i=np.argmin(chi2)
if i<2 or i>=chi2.size-2 :
# something went wrong
delta=0.
sigma=100.
else :
# refine minimum
hh=int(0.6/dwave)+1
b=i-hh
e=i+hh+1
if b<0 :
b=0
e=b+2*hh+1
if e>2*ihw+1 :
e=2*ihw+1
b=e-(2*hh+1)
x=dwave*(np.arange(b,e)-ihw)
c=np.polyfit(x,chi2[b:e],deg)
if c[0]>0 :
delta=-c[1]/(2.*c[0])
sigma=np.sqrt(1./c[0] + error_floor**2)
if ndata>1 :
chi2pdf=(c[0]*delta**2+c[1]*delta+c[2])/(ndata+1)
if chi2pdf>1 : sigma *= np.sqrt(chi2pdf)
else :
# something else went wrong
delta=0.
sigma=100.
'''
print("dw= %f +- %f"%(delta,sigma))
if np.abs(delta)>1. :
print("chi2/ndf=%f/%d=%f"%(chi2[i],(ndata-1),chi2[i]/(ndata-1)))
import matplotlib.pyplot as plt
x=dwave*(np.arange(chi2.size)-ihw)
plt.plot(x,chi2,"o-")
pol=np.poly1d(c)
xx=np.linspace(x[b],x[e-1],20)
plt.plot(xx,pol(xx))
plt.axvline(delta)
plt.axvline(delta-sigma)
plt.axvline(delta+sigma)
plt.show()
'''
return delta,sigma
def compute_dy_from_spectral_cross_correlations_of_frame(flux, ivar, wave , xcoef, ycoef, wavemin, wavemax, reference_flux , n_wavelength_bins = 4) :
"""
Measures y offsets from a set of resampled spectra and a reference spectrum that are on the same wavelength grid.
reference_flux is the assumed well calibrated spectrum.
Calls compute_dy_from_spectral_cross_correlation per fiber
Args:
flux : 2D np.array of shape (nfibers,nwave)
ivar : 2D np.array of shape (nfibers,nwave) , inverse variance of flux
wave : 1D array of wavelength (in Angstrom) of size nwave
refflux : 1D array of reference spectral flux of size nwave
Optional:
n_wavelength_bins : number of bins along wavelength
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
x_for_dy=np.array([])
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
fiber_for_dy=np.array([])
wave_for_dy=np.array([])
nfibers = flux.shape[0]
for fiber in range(nfibers) :
log.info("computing dy for fiber #%03d"%fiber)
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw=np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0))
if sw<=0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(flux[fiber,ok],wave[ok],reference_flux[ok],ivar=ivar[fiber,ok],hw=3.)
block_wave = np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0)*wave[ok])/sw
if err > 1 :
continue
rw = legx(block_wave,wavemin,wavemax)
tx = legval(rw,xcoef[fiber])
ty = legval(rw,ycoef[fiber])
eps=0.1
yp = legval(legx(block_wave+eps,wavemin,wavemax),ycoef[fiber])
dydw = (yp-ty)/eps
tdy = -dwave*dydw
tey = err*dydw
x_for_dy=np.append(x_for_dy,tx)
y_for_dy=np.append(y_for_dy,ty)
dy=np.append(dy,tdy)
ey=np.append(ey,tey)
fiber_for_dy=np.append(fiber_for_dy,fiber)
wave_for_dy=np.append(wave_for_dy,block_wave)
return x_for_dy,y_for_dy,dy,ey,fiber_for_dy,wave_for_dy
def compute_dy_using_boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers, width=7, degyy=2) :
"""
Measures y offsets (internal wavelength calibration) from a preprocessed image and a trace set using a cross-correlation of boxcar extracted spectra.
Uses boxcar_extraction , resample_boxcar_frame , compute_dy_from_spectral_cross_correlations_of_frame
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=7)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=4)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
# measure y shifts
return compute_dy_from_spectral_cross_correlations_of_frame(flux=flux, ivar=ivar, wave=wave, xcoef=xcoef, ycoef=ycoef, wavemin=wavemin, wavemax=wavemax, reference_flux = mflux , n_wavelength_bins = degyy+4)
def compute_dx_from_cross_dispersion_profiles(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7,deg=2) :
"""
Measure x offsets from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 5
deg : degree of polynomial fit as a function of y, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
log.info("Starting compute_dx_from_cross_dispersion_profiles ...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
error_floor = 0.04 # pixel
# Variance based on inverse variance's size
var = np.zeros(image.ivar.shape)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
y = np.arange(n0)
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
ox=np.array([])
oy=np.array([])
odx=np.array([])
oex=np.array([])
of=np.array([])
ol=np.array([])
for f,fiber in enumerate(fibers) :
log.info("computing dx for fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
twave = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = legval(legx(twave, wavemin, wavemax), xcoef[fiber])
x_of_y_int = np.floor(x_of_y+0.5).astype(int)
dx = (xx.T-x_of_y).T
mask=((xx.T>=x_of_y_int-hw)&(xx.T<=x_of_y_int+hw)).T
ok = ((image.ivar[mask]==0).reshape((n0,width)).sum(-1)==0)
swdx = (dx[mask] * image.pix[mask] ).reshape((n0,width)).sum(-1)
swdxvar = (dx[mask]**2/(image.ivar[mask]+0.1*(image.ivar[mask]==0) )).reshape((n0,width)).sum(-1)
sw = (image.pix[mask]).reshape((n0,width)).sum(-1)
swy = sw*y
swx = sw*x_of_y
swl = sw*twave
# rebin
rebin = 200
ok = ((ok[:(n0//rebin)*rebin].reshape(n0//rebin,rebin)==0).sum(-1)==0)
sw = sw[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdx = swdx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdxvar = swdxvar[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swx = swx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swy = swy[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swl = swl[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
'''
import matplotlib.pyplot as plt
i=np.where((sw>0.01)&(ok>0))[0]
plt.errorbar(swy[i]/sw[i],swdx[i]/sw[i],np.sqrt(swdxvar[i])/sw[i],fmt="o")
plt.show()
'''
sw[sw<0] = 0
fex = np.sqrt(swdxvar/(sw+(sw==0))**2 + error_floor**2) # error on dx, with an error floor
ok &= (fex>0)&(fex<10) # ok means no ivar=0 pixel
fex = fex[ok]
fdx = (swdx/(sw+(sw==0)))[ok]
fx = (swx/(sw+(sw==0)))[ok]
fy = (swy/(sw+(sw==0)))[ok]
fl = (swl/(sw+(sw==0)))[ok]
good_fiber=True
for loop in range(10) :
if fdx.size < deg+2 :
good_fiber=False
break
try :
c = np.polyfit(fy,fdx,deg,w=1/fex**2)
pol = np.poly1d(c)
chi2 = (fdx-pol(fy))**2/fex**2
mchi2 = np.median(chi2)
#log.info("mchi2=%f"%mchi2)
#if mchi2>1 :
# fex *= np.sqrt(mchi2)
ok = np.where(chi2<=25.*mchi2)[0]
nbad = fdx.size-ok.size
fex = fex[ok]
fdx = fdx[ok]
fx = fx[ok]
fy = fy[ok]
fl = fl[ok]
except LinAlgError :
good_fiber=False
break
if nbad==0 :
break
#print("removing %d bad measurements"%nbad)
# we return the original sample of offset values
if good_fiber :
ox = np.append(ox,fx)
oy = np.append(oy,fy)
odx = np.append(odx,fdx)
oex = np.append(oex,fex)
of = np.append(of,fiber*np.ones(fy.size))
ol = np.append(ol,fl)
return ox,oy,odx,oex,of,ol
def shift_ycoef_using_external_spectrum(psf,xcoef,ycoef,wavemin,wavemax,image,fibers,spectrum_filename,degyy=2,width=7) :
"""
Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra
and an external well-calibrated spectrum.
The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now).
A relative flux calibration of the spectra is performed internally.
Args:
psf : specter PSF
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
fibers : 1D np.array of fiber indices
spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units)
Optional:
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
ycoef : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD
"""
log = get_logger()
tmp=np.loadtxt(spectrum_filename).T
ref_wave=tmp[0]
ref_spectrum=tmp[1]
log.info("read reference spectrum in %s with %d entries"%(spectrum_filename,ref_wave.size))
log.info("rextract spectra with boxcar")
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=width)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=2)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
mivar=np.median(ivar,axis=0)*flux.shape[0]*(2./np.pi) # very appoximate !
# trim ref_spectrum
i=(ref_wave>=wave[0])&(ref_wave<=wave[-1])
ref_wave=ref_wave[i]
ref_spectrum=ref_spectrum[i]
# check wave is linear or make it linear
if np.abs((ref_wave[1]-ref_wave[0])-(ref_wave[-1]-ref_wave[-2]))>0.0001*(ref_wave[1]-ref_wave[0]) :
log.info("reference spectrum wavelength is not on a linear grid, resample it")
dwave = np.min(np.gradient(ref_wave))
tmp_wave = np.linspace(ref_wave[0],ref_wave[-1],int((ref_wave[-1]-ref_wave[0])/dwave))
ref_spectrum = resample_flux(tmp_wave, ref_wave , ref_spectrum)
ref_wave = tmp_wave
try :
# compute psf at most significant line of ref_spectrum
i=np.argmax(ref_spectrum)
central_wave_for_psf_evaluation = ref_wave[i]
fiber_for_psf_evaluation = (boxcar_flux.shape[0]//2)
dwave=ref_wave[i+1]-ref_wave[i]
hw=int(3./dwave)+1 # 3A half width
wave_range = ref_wave[i-hw:i+hw+1]
x,y=psf.xy(fiber_for_psf_evaluation,wave_range)
x=np.tile(x[hw]+np.arange(-hw,hw+1)*(y[-1]-y[0])/(2*hw+1),(y.size,1))
y=np.tile(y,(2*hw+1,1)).T
kernel2d=psf._value(x,y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
kernel1d=np.sum(kernel2d,axis=1)
log.info("convolve reference spectrum using PSF at fiber %d and wavelength %dA"%(fiber_for_psf_evaluation,central_wave_for_psf_evaluation))
ref_spectrum=fftconvolve(ref_spectrum,kernel1d, mode='same')
except :
log.warning("couldn't convolve reference spectrum: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
# resample input spectrum
log.info("resample convolved reference spectrum")
ref_spectrum = resample_flux(wave, ref_wave , ref_spectrum)
log.info("absorb difference of calibration")
x=(wave-wave[wave.size//2])/50.
kernel=np.exp(-x**2/2)
f1=fftconvolve(mflux,kernel,mode='same')
f2=fftconvolve(ref_spectrum,kernel,mode='same')
scale=f1/f2
ref_spectrum *= scale
log.info("fit shifts on wavelength bins")
# define bins
n_wavelength_bins = degyy+4
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
wave_for_dy=np.array([])
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw= np.sum(mflux[ok]*(mflux[ok]>0))
if sw==0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(mflux[ok],wave[ok],ref_spectrum[ok],ivar=mivar[ok],hw=3.)
bin_wave = np.sum(mflux[ok]*(mflux[ok]>0)*wave[ok])/sw
x,y=psf.xy(fiber_for_psf_evaluation,bin_wave)
eps=0.1
x,yp=psf.xy(fiber_for_psf_evaluation,bin_wave+eps)
dydw=(yp-y)/eps
if err*dydw<1 :
dy=np.append(dy,-dwave*dydw)
ey=np.append(ey,err*dydw)
wave_for_dy=np.append(wave_for_dy,bin_wave)
y_for_dy=np.append(y_for_dy,y)
log.info("wave = %fA , y=%d, measured dwave = %f +- %f A"%(bin_wave,y,dwave,err))
if False : # we don't need this for now
try :
log.info("correcting bias due to asymmetry of PSF")
hw=5
oversampling=4
xx=np.tile(np.arange(2*hw*oversampling+1)-hw*oversampling,(2*hw*oversampling+1,1))/float(oversampling)
yy=xx.T
x,y=psf.xy(fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
dy_asym_central = np.sum(yy*prof)/np.sum(prof)
for i in range(dy.size) :
x,y=psf.xy(fiber_for_psf_evaluation,wave_for_dy[i])
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,wave_for_dy[i])
dy_asym = np.sum(yy*prof)/np.sum(prof)
log.info("y=%f, measured dy=%f , bias due to PSF asymetry = %f"%(y,dy[i],dy_asym-dy_asym_central))
dy[i] -= (dy_asym-dy_asym_central)
except :
log.warning("couldn't correct for asymmetry of PSF: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
log.info("polynomial fit of shifts and modification of PSF ycoef")
# pol fit
coef = np.polyfit(wave_for_dy,dy,degyy,w=1./ey**2)
pol = np.poly1d(coef)
for i in range(dy.size) :
log.info("wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f"%(wave_for_dy[i],y_for_dy[i],dy[i],ey[i],pol(wave_for_dy[i])))
log.info("apply this to the PSF ycoef")
wave = np.linspace(wavemin,wavemax,100)
dy = pol(wave)
dycoef = legfit(legx(wave,wavemin,wavemax),dy,deg=ycoef.shape[1]-1)
for fiber in range(ycoef.shape[0]) :
ycoef[fiber] += dycoef
return ycoef
# end of routines for cross-correlation method for trace shifts
# beginning of routines for forward model method for trace shifts
def compute_fiber_bundle_trace_shifts_using_psf(fibers,line,psf,image,maxshift=2.) :
"""
Computes trace shifts along x and y from a preprocessed image, a PSF (with trace coords), and a given emission line,
by doing a forward model of the image.
Args:
fibers : 1D array with list of fibers
line : float, wavelength of an emission line (in Angstrom)
psf : specter psf object
image : DESI preprocessed image object
Optional:
maxshift : float maximum shift in pixels for 2D chi2 scan
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
dy : 1D array of shifts along y coordinates on CCD
sx : 1D array of uncertainties on dx
sy : 1D array of uncertainties on dy
"""
log=get_logger()
#log.info("compute_fiber_bundle_offsets fibers={} line={}".format(fibers,line))
# get central coordinates of bundle for interpolation of offsets on CCD
x,y = psf.xy([int(np.median(fibers)),],line)
try :
nfibers=len(fibers)
# compute stamp coordinates
xstart=None
xstop=None
ystart=None
ystop=None
xs=[]
ys=[]
pix=[]
xx=[]
yy=[]
for fiber in fibers :
txs,tys,tpix = psf.xypix(fiber,line)
xs.append(txs)
ys.append(tys)
pix.append(tpix)
if xstart is None :
xstart =txs.start
xstop =txs.stop
ystart =tys.start
ystop =tys.stop
else :
xstart =min(xstart,txs.start)
xstop =max(xstop,txs.stop)
ystart =min(ystart,tys.start)
ystop =max(ystop,tys.stop)
# load stamp data, with margins to avoid problems with shifted psf
margin=int(maxshift)+1
stamp=np.zeros((ystop-ystart+2*margin,xstop-xstart+2*margin))
stampivar=np.zeros(stamp.shape)
stamp[margin:-margin,margin:-margin]=image.pix[ystart:ystop,xstart:xstop]
stampivar[margin:-margin,margin:-margin]=image.ivar[ystart:ystop,xstart:xstop]
# will use a fixed footprint despite changes of psf stamps
# so that chi2 always based on same data set
footprint=np.zeros(stamp.shape)
for i in range(nfibers) :
footprint[margin-ystart+ys[i].start:margin-ystart+ys[i].stop,margin-xstart+xs[i].start:margin-xstart+xs[i].stop]=1
#plt.imshow(footprint) ; plt.show() ; sys.exit(12)
# define grid of shifts to test
res=0.5
nshift=int(maxshift/res)
dx=res*np.tile(np.arange(2*nshift+1)-nshift,(2*nshift+1,1))
dy=dx.T
original_shape=dx.shape
dx=dx.ravel()
dy=dy.ravel()
chi2=np.zeros(dx.shape)
A=np.zeros((nfibers,nfibers))
B=np.zeros((nfibers))
mods=np.zeros(np.zeros(nfibers).shape+stamp.shape)
debugging=False
if debugging : # FOR DEBUGGING KEEP MODELS
models=[]
# loop on possible shifts
# refit fluxes and compute chi2
for d in range(len(dx)) :
# print(d,dx[d],dy[d])
A *= 0
B *= 0
mods *= 0
for i,fiber in enumerate(fibers) :
# apply the PSF shift
psf._cache={} # reset cache !!
psf.coeff['X']._coeff[fiber][0] += dx[d]
psf.coeff['Y']._coeff[fiber][0] += dy[d]
# compute pix and paste on stamp frame
xx, yy, pix = psf.xypix(fiber,line)
mods[i][margin-ystart+yy.start:margin-ystart+yy.stop,margin-xstart+xx.start:margin-xstart+xx.stop]=pix
# undo the PSF shift
psf.coeff['X']._coeff[fiber][0] -= dx[d]
psf.coeff['Y']._coeff[fiber][0] -= dy[d]
B[i] = np.sum(stampivar*stamp*mods[i])
for j in range(i+1) :
A[i,j] = np.sum(stampivar*mods[i]*mods[j])
if j!=i :
A[j,i] = A[i,j]
Ai=np.linalg.inv(A)
flux=Ai.dot(B)
model=np.zeros(stamp.shape)
for i in range(nfibers) :
model += flux[i]*mods[i]
chi2[d]=np.sum(stampivar*(stamp-model)**2)
if debugging :
models.append(model)
if debugging :
schi2=chi2.reshape(original_shape).copy() # FOR DEBUGGING
sdx=dx.copy()
sdy=dy.copy()
# find minimum chi2 grid point
k = chi2.argmin()
j,i = np.unravel_index(k, ((2*nshift+1),(2*nshift+1)))
#print("node dx,dy=",dx.reshape(original_shape)[j,i],dy.reshape(original_shape)[j,i])
# cut a region around minimum
delta=1
istart=max(0,i-delta)
istop=min(2*nshift+1,i+delta+1)
jstart=max(0,j-delta)
jstop=min(2*nshift+1,j+delta+1)
chi2=chi2.reshape(original_shape)[jstart:jstop,istart:istop].ravel()
dx=dx.reshape(original_shape)[jstart:jstop,istart:istop].ravel()
dy=dy.reshape(original_shape)[jstart:jstop,istart:istop].ravel()
# fit 2D polynomial of deg2
m = np.array([dx*0+1, dx, dy, dx**2, dy**2, dx*dy ]).T
c, r, rank, s = np.linalg.lstsq(m, chi2)
if c[3]>0 and c[4]>0 :
# get minimum
# dchi2/dx=0 : c[1]+2*c[3]*dx+c[5]*dy = 0
# dchi2/dy=0 : c[2]+2*c[4]*dy+c[5]*dx = 0
a=np.array([[2*c[3],c[5]],[c[5],2*c[4]]])
b=np.array([c[1],c[2]])
t=-np.linalg.inv(a).dot(b)
dx=t[0]
dy=t[1]
sx=1./np.sqrt(c[3])
sy=1./np.sqrt(c[4])
#print("interp dx,dy=",dx,dy)
if debugging : # FOR DEBUGGING
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(2,2,1,title="chi2")
plt.imshow(schi2,extent=(-nshift*res,nshift*res,-nshift*res,nshift*res),origin=0,interpolation="nearest")
plt.plot(dx,dy,"+",color="white",ms=20)
plt.xlabel("x")
plt.ylabel("y")
plt.subplot(2,2,2,title="data")
plt.imshow(stamp*footprint,origin=0,interpolation="nearest")
plt.grid()
k0=np.argmin(sdx**2+sdy**2)
plt.subplot(2,2,3,title="original psf")
plt.imshow(models[k0],origin=0,interpolation="nearest")
plt.grid()
plt.subplot(2,2,4,title="shifted psf")
plt.imshow(models[k],origin=0,interpolation="nearest")
plt.grid()
plt.show()
else :
log.warning("fit failed (bad chi2 surf.) for fibers [%d:%d] line=%dA"%(fibers[0],fibers[-1]+1,int(line)))
dx=0.
dy=0.
sx=10.
sy=10.
except LinAlgError :
log.warning("fit failed (masked or missing data) for fibers [%d:%d] line=%dA"%(fibers[0],fibers[-1]+1,int(line)))
dx=0.
dy=0.
sx=10.
sy=10.
return x,y,dx,dy,sx,sy
def compute_dx_dy_using_psf(psf,image,fibers,lines) :
"""
Computes trace shifts along x and y from a preprocessed image, a PSF (with trace coords), and a set of emission lines,
by doing a forward model of the image.
Calls compute_fiber_bundle_trace_shifts_using_psf.
Args:
psf : specter psf object
image : DESI preprocessed image object
fibers : 1D array with list of fibers
lines : 1D array of wavelength of emission lines (in Angstrom)
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
dy : 1D array of shifts along y coordinates on CCD
sx : 1D array of uncertainties on dx
sy : 1D array of uncertainties on dy
fiber : 1D array of fiber ID
wave : 1D array of wavelength
"""
log = get_logger()
nlines=len(lines)
nfibers=len(fibers)
log.info("computing spots coordinates and define bundles")
x=np.zeros((nfibers,nlines))
y=np.zeros((nfibers,nlines))
# load expected spots coordinates
for fiber in range(nfibers) :
for l,line in enumerate(lines) :
x[fiber,l],y[fiber,l] = psf.xy(fiber,line)
bundle_fibers=[]
bundle_xmin=[]
bundle_xmax=[]
xwidth=9.
bundle_xmin.append(x[0,nlines//2]-xwidth/2)
bundle_xmax.append(x[0,nlines//2]+xwidth/2)
bundle_fibers.append([0,])
for fiber in range(1,nfibers) :
tx=x[fiber,nlines//2]
found=False
for b in range(len(bundle_fibers)) :
if tx+xwidth/2 >= bundle_xmin[b] and tx-xwidth/2 <= bundle_xmax[b] :
found=True
bundle_fibers[b].append(fiber)
bundle_xmin[b]=min(bundle_xmin[b],tx-xwidth/2)
bundle_xmax[b]=max(bundle_xmax[b],tx+xwidth/2)
break
if not found :
bundle_fibers.append([fiber,])
bundle_xmin.append(tx-xwidth/2)
bundle_xmax.append(tx+xwidth/2)
log.info("measure offsets dx dy per bundle ({}) and spectral line ({})".format(len(bundle_fibers),len(lines)))
wave_xy=np.array([]) # line
fiber_xy=np.array([]) # central fiber in bundle
x=np.array([]) # central x in bundle at line wavelength
y=np.array([]) # central x in bundle at line wavelength
dx=np.array([]) # measured offset along x
dy=np.array([]) # measured offset along y
ex=np.array([]) # measured offset uncertainty along x
ey=np.array([]) # measured offset uncertainty along y
for b in range(len(bundle_fibers)) :
for l,line in enumerate(lines) :
tx,ty,tdx,tdy,tex,tey = compute_fiber_bundle_trace_shifts_using_psf(fibers=bundle_fibers[b],psf=psf,image=image,line=line)
log.info("fibers [%d:%d] %dA dx=%4.3f+-%4.3f dy=%4.3f+-%4.3f"%(bundle_fibers[b][0],bundle_fibers[b][-1]+1,int(line),tdx,tex,tdy,tey))
if tex<1. and tey<1. :
wave_xy=np.append(wave_xy,line)
fiber_xy=np.append(fiber_xy,int(np.median(bundle_fibers[b])))
x=np.append(x,tx)
y=np.append(y,ty)
dx=np.append(dx,tdx)
dy=np.append(dy,tdy)
ex=np.append(ex,tex)
ey=np.append(ey,tey)
return x,y,dx,ex,dy,ey,fiber_xy,wave_xy
# end of routines for forward model method
def monomials(x,y,degx,degy) :
"""
Computes monomials as a function of x and y of a 2D polynomial of degrees degx and degy
Args:
x : ND array
y : ND array of same shape as x
degx : int (>=0), polynomial degree along x
degy : int (>=0), polynomial degree along y
Returns :
monomials : ND array of shape ( (degx+1)*(degy+1) , x shape )
"""
M=[]
for i in range(degx+1) :
for j in range(degy+1) :
M.append(x**i*y**j)
return | np.array(M) | numpy.array |
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import re
# STEP0. CALIBRATION BOARD BEFORE CAMERA CALIBRATION
images = glob.glob("camera_cal/calibration*.jpg")
# Chess board (9,6)
objpoints = []
imgpoints = []
objp = np.zeros((6*9, 3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
def cal_undistort(img, objpoints, imgpoints):
img_size = (img.shape[1], img.shape[0]) # x, y
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def color_thresholding(img, threshold=(0,255), opt=("rgb")):
# read using mpimg as R.G.B
img_in = np.copy(img)
if (opt == "rgb"):
rgb = img_in
r_channel = rgb[:,:,0]
g_channel = rgb[:,:,1]
b_channel = rgb[:,:,2]
r_binary = np.zeros_like(r_channel)
r_channel = cv2.equalizeHist(r_channel)
r_binary[(r_channel >= threshold[0]) & (r_channel <= threshold[1])]=1
return r_binary
elif (opt == "hls"):
hls = cv2.cvtColor(img_in, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_channel = cv2.equalizeHist(s_channel)
s_binary[(s_channel >= threshold[0]) & (s_channel <= threshold[1])]=1
return s_binary
else:
return img_in
def gradient_thresholding(img, threshold=(0,255), opt=("comb")):
# read using mpimg as R.G.B
img_in = np.copy(img)
gray= cv2.cvtColor(img_in, cv2.COLOR_RGB2GRAY)
gray = cv2.equalizeHist(gray)
img_sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=3)
img_sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=3)
abs_sobelx = np.absolute(img_sobel_x)
abs_sobely = np.absolute(img_sobel_y)
scaled_sobelx = np.uint8(
255*abs_sobelx / np.max(abs_sobelx)
)
scaled_sobely = np.uint8(
255*abs_sobely / np.max(abs_sobely)
)
img_sobel_xy = np.sqrt(img_sobel_x**2 + img_sobel_y**2)
scaled_sobelxy = np.uint8(
255*img_sobel_xy / np.max(img_sobel_xy)
)
direction = np.arctan2(abs_sobelx, abs_sobely)
if (opt == "comb"):
binary_comb = np.zeros_like(scaled_sobelxy)
binary_comb[
(scaled_sobelxy >= threshold[0]) & (scaled_sobelxy <= threshold[1])
]=1
return binary_comb
elif (opt == "x"):
binary_x = np.zeros_like(scaled_sobelx)
binary_x[
(scaled_sobelx >= threshold[0]) & (scaled_sobelx <= threshold[1])
]=1
return binary_x
elif (opt == "y"):
binary_y = np.zeros_like(scaled_sobely)
binary_y[
(scaled_sobely >= threshold[0]) & (scaled_sobely <= threshold[1])
]=1
return binary_y
elif (opt =="dir"):
binary_dir = np.zeros_like(direction)
binary_dir[
(direction >= threshold[0]) & (direction <= threshold[1])
]=1
return binary_dir
else:
return img_in
def perspective_img(image, region_rect, mode=("normal")):
img_in = np.copy(image)
x_len = img_in.shape[1]
y_len = img_in.shape[0]
if (mode == "normal"):
src_pts = np.array(region_rect)
margin=50
warp_rect = np.array([[margin, margin] ,[x_len-margin, margin], [x_len-margin, y_len-margin], [margin, y_len-margin]], np.float32)
out_pts = np.array(warp_rect)
else: #inverse
margin=50
warp_rect = np.array([[margin, margin] ,[x_len-margin, margin], [x_len-margin, y_len-margin], [margin, y_len-margin]], np.float32)
src_pts = np.array(warp_rect)
out_pts = np.array(region_rect)
M = cv2.getPerspectiveTransform(src_pts, out_pts)
warp = cv2.warpPerspective(img_in, M, (x_len, y_len))
return warp
def fit_polynomial(img_shape, leftx, lefty, rightx, righty):
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
try: #2nd order linear model was fitted using np.polyfit
left_fit_coef = np.polyfit(lefty,leftx,2)
right_fit_coef = np.polyfit(righty, rightx, 2)
left_fitx= left_fit_coef[0]*ploty**2 + left_fit_coef[1]*ploty + left_fit_coef[2]
right_fitx = right_fit_coef[0]*ploty**2 + right_fit_coef[1]*ploty + right_fit_coef[2]
except TypeError:
left_fitx = ploty
right_fitx = ploty
left_fit_coef = None
right_fit_coef = None
return left_fitx, right_fitx, ploty, left_fit_coef, right_fit_coef
def search_around_poly(binary_warp, init_tune):
binary_warped = np.copy(binary_warp)
margin = 100
nonzero = binary_warped.nonzero() # nonzero index return!
nonzerox = np.array(nonzero[1])
nonzeroy = np.array(nonzero[0])
left_lane_inds = (
nonzerox
> (
init_tune[0][0] * (nonzeroy) ** 2
+ init_tune[0][1] * nonzeroy
+ init_tune[0][2]
- margin
)
) & (
nonzerox
< (
init_tune[0][0] * (nonzeroy) ** 2
+ init_tune[0][1] * nonzeroy
+ init_tune[0][2]
+ margin
)
)
right_lane_inds = (
nonzerox
> (
init_tune[1][0] * (nonzeroy) ** 2
+ init_tune[1][1] * nonzeroy
+ init_tune[1][2]
- margin
)
) & (
nonzerox
< (
init_tune[1][0] * (nonzeroy) ** 2
+ init_tune[1][1] * nonzeroy
+ init_tune[1][2]
+ margin
)
)
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fitx, right_fitx, ploty, left_fit_coef, right_fit_coef = fit_polynomial(binary_warped.shape, leftx, lefty, rightx, righty)
left_right_coeff = np.array([left_fit_coef, right_fit_coef])
## VISUALIZATION FOR TESTING
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='blue')
return binary_warped, left_fitx, right_fitx, ploty, left_right_coeff
def measure_curvature(image, left_fitx, right_fitx, ploty, ratio=(1,1)):
# Image x size
img_x_size = image.shape[1]
left_fit_cr = np.polyfit(ploty * ratio[1], left_fitx * ratio[0], 2)
right_fit_cr = np.polyfit(ploty * ratio[1], right_fitx * ratio[0], 2)
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = (
1 + (2 * left_fit_cr[0] * y_eval * ratio[1] + left_fit_cr[1]) ** 2
) ** 1.5 / np.absolute(2 * left_fit_cr[0])
right_curverad = (
1 + (2 * right_fit_cr[0] * y_eval * ratio[1] + right_fit_cr[1]) ** 2
) ** 1.5 / | np.absolute(2 * right_fit_cr[0]) | numpy.absolute |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
import pandas as pd
import nltk
import re
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from trafilatura import bare_extraction
import trafilatura
import discord
nltk.download('punkt')
nltk.download('stopwords')
def scoreSent(sent, scoreMatrix, scoreCol):
score = 0
for word in sent.split(" "):
if word in scoreMatrix.index:
filt = scoreMatrix.filter(items=[word], axis='index')
score += filt[scoreCol].values[0]
return score/len(sent)
def filterStopwords(sent):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(sent)
return " ".join([w for w in word_tokens if w not in stop_words])
def getSummarySpread(filePath, numSent):
f = open(filePath, "r")
text = f.read()
text = " ".join(text.split())
for i in range(100):
text = text.replace("[" + str(i) + "]", "")
doc = nltk.tokenize.sent_tokenize(text)
docFilt = [filterStopwords(s) for s in doc]
vectorizer = CountVectorizer()
bag_of_words = vectorizer.fit_transform(docFilt)
svd = TruncatedSVD(n_components = numSent)
lsa = svd.fit_transform(bag_of_words)
col = ["topic" + str(i) for i in range(numSent)]
absCol = ["abs_topic" + str(i) for i in range(numSent)]
topic_encoded_df = pd.DataFrame(lsa, columns=col)
topic_encoded_df["docFilt"] = docFilt
topic_encoded_df["doc"] = doc
dictionary = vectorizer.get_feature_names_out()
encoding_matrix=pd.DataFrame(svd.components_,index=col,columns=dictionary).T
for i in range(numSent):
encoding_matrix[absCol[i]] = | np.abs(encoding_matrix[col[i]]) | numpy.abs |
import numpy
from matplotlib import pyplot
def euler(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
y[:,n+1] = y[:,n] + dx * f(x[n], y[:,n])
return x, dx, y
def euler_pc(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
fn = f(x[n], y[:,n])
yp = y[:,n] + dx * fn
y[:,n+1] = y[:,n] + dx / 2 * (fn + f(x[n+1], yp))
return x, dx, y
def rk4(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
k1 = dx * f(x[n] , y[:,n] )
k2 = dx * f(x[n] + dx / 2, y[:,n] + k1 / 2)
k3 = dx * f(x[n] + dx / 2, y[:,n] + k2 / 2)
k4 = dx * f(x[n] + dx , y[:,n] + k3 )
y[:,n+1] = y[:,n] + (k1 + 2 * (k2 + k3) + k4) / 6
return x, dx, y
if __name__=="__main__":
def f_sin(x, y):
return -numpy.sin(x)
print("Euler Predictor-Corrector")
x, dx, y = euler_pc(f_sin, 0.5, [1], 5)
print("dx=", dx, "y(0.5)=", y[0,-1])
x, dx, y = euler_pc(f_sin, 0.5, [1], 50)
print("dx=", dx, "y(0.5)=", y[0,-1])
Npoints = 5*2**numpy.arange(1,10)
dx_all = 0.5/Npoints
errors = numpy.zeros_like(dx_all)
for i, N in enumerate(Npoints):
x, dx, y = euler_pc(f_sin, 0.5, [1], N)
errors[i] = abs(y[0,-1] - numpy.cos(0.5))
dx_all[i] = dx
pyplot.figure(figsize=(12,6))
pyplot.loglog(dx_all, errors, 'kx')
pyplot.loglog(dx_all, errors[0]*(dx_all/dx_all[0])**2, 'b-',
label=r"$\propto \Delta x^2$")
pyplot.legend(loc='upper left')
pyplot.xlabel(r"$\Delta x$")
pyplot.ylabel("Error")
pyplot.show()
def f_circle(x, y):
dydx = numpy.zeros_like(y)
dydx[0] = -y[1]
dydx[1] = y[0]
return dydx
y0 = numpy.array([1, 0])
x, dx, y = euler_pc(f_circle, 50, y0, 500)
pyplot.figure(figsize=(8,8))
pyplot.plot(y[0,:], y[1,:])
pyplot.show()
print("RK4")
x, dx, y = rk4(f_sin, 0.5, [1], 5)
print("dx=", dx, "y(0.5)=", y[0,-1])
x, dx, y = rk4(f_sin, 0.5, [1], 50)
print("dx=", dx, "y(0.5)=", y[0,-1])
Npoints = 5*2**numpy.arange(1,10)
dx_all = 0.5/Npoints
errors = numpy.zeros_like(dx_all)
for i, N in enumerate(Npoints):
x, dx, y = rk4(f_sin, 0.5, [1], N)
errors[i] = abs(y[0,-1] - numpy.cos(0.5))
dx_all[i] = dx
pyplot.figure(figsize=(12,6))
pyplot.loglog(dx_all, errors, 'kx')
pyplot.loglog(dx_all, errors[0]*(dx_all/dx_all[0])**4, 'b-',
label=r"$\propto \Delta x^4$")
pyplot.legend(loc='upper left')
pyplot.xlabel(r"$\Delta x$")
pyplot.ylabel("Error")
pyplot.show()
y0 = | numpy.array([1, 0]) | numpy.array |
""" Utility module with various helper functions and subplot generaters"""
import pandas as pd
import numpy as np
import os
import sys
import re
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
from matplotlib.colors import is_color_like
from scipy.cluster.hierarchy import distance
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import optimal_leaf_ordering
from scipy.spatial.distance import pdist
import DPre.main.config as config
from DPre.main._logger import logger, spacer
def _add_mg_types(data, down):
"""Add markergene type index (up and down) to columns at level 0"""
orig_order = data.columns.unique(0)
updown_idx = ['up']*data.shape[1]
if down:
updown_idx.extend(['down']*data.shape[1])
data = pd.concat((data, data), axis=1)
data.columns = _add_level(data.columns, updown_idx)
return data.reindex(orig_order, axis=1, level=1)
def _bool_to_int_genes(_diff, trans_updown=True, return_merged=False):
"""Take _diff input and convert up-genes to +1, down-genes to -1, optionally
transfer up- and down values to each other makeing up- and down subframes
equal"""
int_diff = _diff.astype(int)
if 'down' in _diff.columns.unique(0):
int_diff['down'] *= -1
if trans_updown and 'down' in _diff.columns.unique(0):
int_diff['up'] = int_diff['up'].mask(_diff['down'], -1)
int_diff['down'] = int_diff['down'].mask(_diff['up'], 1)
if not return_merged:
return int_diff
else:
return int_diff.xs('up', 1, 0)
def _add_mgtmean(agg):
"""Prodcue the mean between aggregated up- and down mg similarity values"""
agg_mean = agg.groupby(axis=1, level=1, sort=False).mean()
agg_mean.columns = _add_level(agg_mean.columns, 'mean')
return pd.concat([agg, agg_mean], axis=1)
def _add_log2_z(expr, rowwise_sd=False):
"""Compute log2 and z-transformed expression data. Substitute read count in
expression. Optionally, compute the standad deviation row(gene)-wise.
Is used for large datasets like reference transcriptome libraries.
"""
expr = np.log2(expr +1)
expr.columns = _add_level(expr.columns, 'log2', at=1)
m = expr.values.mean()
s = expr.values.std() if not rowwise_sd else expr.std(1)
z_expr = expr.apply(lambda c: (c-m) /s)
z_expr.columns = _add_level(z_expr.columns, 'z', 1)
return pd.concat((expr, z_expr), axis=1).reindex(expr.columns.unique(0),
axis=1, level=0)
def _add_level(index, label, at=0, replace=False, name=''):
"""Add a level with labels 'label' to a pd.MultiIndex"""
index = pd.DataFrame(index=index)
if replace:
index.reset_index(level=at, drop=True, inplace=True)
index[name] = label
order = list(range(index.index.nlevels))
order.insert(at, -1)
return index.set_index(name, append=True).reorder_levels(order).index
def _get_gene_ann(species):
"""Open the gene annotation reference file (mouse/ human) and return it"""
path = os.path.dirname(__file__)
if species == 'mouse':
return pd.read_pickle(path + '/../gene_ann/mg_ensembl96_GRCm38.p6.gzip')
elif species == 'human':
return pd.read_pickle(path + '/../gene_ann/hg_GRCh38.p12.gzip')
else:
logger.info('')
logger.error('Invalid input for species: `{}`. Valid are `mouse` and '
'`human`'.format(species))
sys.exit(1)
def annotate(ensgs, species):
""" Annotate mouse or human ensg keys. Return the gene names.
DPre references the ensembl gene annotation v.96 located at
DPre/gene_ann.
Args:
ensgs (list, pandas.Index): The collection of ensg keys to annotate
species (str): The origin species of the genes, 'mouse' or 'human'.
Returns:
annotated pandas.Index
"""
ref = _get_gene_ann(species)
try:
return pd.Index(ref.reindex(ensgs).name.values)
except Exception as e:
logger.error('{}\nDPre references the ensembl gene annotaiton v.96. '
'Differently annotated datasets may cause problems.'
.format(e))
sys.exit(1)
def get_ensgs(names, species):
""" Return the ensg keys for a list of gene names.
DPre references the ensembl gene annotation v.96 located at
DPre/gene_ann. If a gene name has multiple ensg keys, this gene will appear
last in the DataFrame regardless of the input order.
Args:
names (list pandas.Index): The collection of names to return ensg keys
for
species (str): The origin species of the genes, 'mouse' or 'human'.
Returns:
pandas.Index of ensg keys
"""
ref = _get_gene_ann(species)
try:
ann = ref.reindex(ref.index[ref.name.isin(names)]).reset_index()
if ann.name.duplicated().any():
dupl = pd.Index(ann.name).duplicated()
ann_dr = ann[~dupl]
ann_du = ann[dupl]
ann_dr = ann_dr.set_index('name').reindex(names).reset_index()
ann_dr.rename({'index': 'name'}, axis=1, inplace=1)
ann = ann_dr.append(ann_du, sort=False)
ann.index = np.arange(ann.shape[0])
else:
ann = ann.set_index('name').reindex(names).reset_index()
ann.rename({'index': 'name'}, axis=1, inplace=1)
return ann
except Exception as e:
logger.error('{}\nDPre references the ensembl gene annotaiton v.96. '
'Differently annotated datasets may cause problems.'
.format(e))
sys.exit(1)
def _align_indices(data, order, axis=1):
"""Align the indices/ columns in a collection of pandas objects to order"""
for i in range(len(data)):
if data[i] is not None:
data[i] = data[i].reindex(order, axis=axis)
return data
def _init_figure(fig_widths, fig_heights, nplts, spacers):
"""Calculate the size proportion of each plot element, create figure"""
width, height = sum(fig_widths), sum(fig_heights)
ratio = {'width_ratios': list(map(lambda w: w/width,
fig_widths[1:-2])),
'height_ratios': list(map(lambda h: h/height,
fig_heights[1:-2]))}
# init figure
fig, axes = plt.subplots(*nplts, figsize=(width, height),
gridspec_kw=ratio)
if not isinstance(axes, np.ndarray):
axes = np.array([axes])
axes = _clean_axes(axes)
wspace_prop = spacers[0] /np.array(fig_widths[1:-2]).mean()
hspace_prop = spacers[1] /np.array(fig_heights[1:-2]).mean()
adj_args = {'left': fig_widths[0] /width,
'wspace': wspace_prop,
'right': 1 - fig_widths[-1] /width,
'top': 1 - fig_heights[0] /height,
'hspace': hspace_prop,
'bottom': fig_heights[-1] /height}
fig.subplots_adjust(**adj_args)
return fig, axes
def _open_file(filename):
"""Open a file based on the filename ending or if not present
on config.SAVE_FORMAT. Must be supporte by matplotlib."""
valid = plt.figure().canvas.get_supported_filetypes()
if not any([filename.endswith(val_format) for val_format in valid]):
if config.SAVE_FORMAT in valid:
filename += '.' + config.SAVE_FORMAT
else:
logger.error('The value for config.SAVE_FORMAT `{}` is not '
'supported by matplotlib. Valid formats are:\n{}'
.format(config.SAVE_FORMAT, ', '.join(list(valid.keys()))))
sys.exit(1)
if filename.endswith('.pdf'):
return filename, PdfPages(filename)
else:
return filename, None
def _save_file(fig, filename=None, pp=None, close_pp=False):
"""Save pdf if pp is passed, otherwise use filename to save as
config.SAVE_FORMAT"""
if pp:
fig.savefig(pp, format='pdf')
if close_pp:
pp.close()
plt.close(fig)
elif filename:
replace = ['$\\mathit{', '}$']
for repl in replace:
filename = filename.replace(repl, '')
fig.savefig(filename)
plt.close(fig)
def _clean_axes(axes):
"""Remove all spines, ticks and tickalabels"""
np.array([axes])
for ax in axes.flatten():
[s.set_visible(False) for s in ax.spines.values()]
ax.tick_params(bottom=False, left=False, labelbottom=False,
labelleft=False)
return axes
def _make_title(differential, metric, el1, el2, pref='', postf=''):
"""Produce the plot title based on plot parmaters, pref and posf are used
for plot specific adjustments; return the title string"""
metric_title = 'metric: '
if metric == 'euclid':
metric_title += 'L1 Euclidean distance'
elif metric == 'pearson':
metric_title += 'Pearson correlation'
elif metric == 'cosine':
metric_title += 'cosine similarity'
elif metric == 'intersect':
metric_title += 'marker gene intersect'
if differential:
dtype = 'Change in '
else:
dtype = 'Absolute '
if pref:
dtype = dtype.lower()
title = ('{}{}{}transcriptional similarity \nof {} & {}\n{}'
.format(pref, dtype, postf, el1, el2, metric_title))
return title[0].upper() + title[1:]
def _heatmap_cluster(dat, where, ax, metric):
"""Cluster the columns or index with scipy; return the new order"""
ax.set_visible(True)
d = dat.T if metric == 'columns' else dat
Y = pdist(d, metric='euclidean')
Z = linkage(Y, method='complete', metric='euclidean')
order = dendrogram(Z,
count_sort = True,
no_labels = True,
orientation = where,
labels = d.index,
above_threshold_color = config.dendrogram_colors[0],
ax = ax)['ivl']
if metric == 'rows':
# for some reason reversed?
order = order[::-1]
return order
def _plot_distance_bar(axes, data, ctrl_lbl, bar_args, draw_colorbar=False,
cb_lbl=None, fig=None, pivot=None, w=None, h=None):
"""Draw the distance bar on top of the heatmap"""
# set ylabel on the left
axes[0].tick_params(labelleft=True)
axes[0].set_ylim(0, 1)
axes[0].set_yticks((.5,))
axes[0].set_yticklabels((ctrl_lbl,), x=.5)
# draw the heatmap
ax = axes[1]
[s.set_visible(True) for s in ax.spines.values()]
im = ax.imshow(data.values, aspect='auto', **bar_args)
# setup the colorbar legend
if draw_colorbar:
at = (config.CB_LEFT_SEC/w, 1- config.CB_TOP/h, config.CB_WIDTH/w,
config.CB_HEIGHT/h)
cb = ax.figure.colorbar(im, cax=fig.add_axes(at), alpha =.3,
orientation='horizontal')
bar_ticks = (bar_args['vmin'], bar_args['vmax'])
cb.set_ticks(bar_ticks)
cb.ax.set_xticklabels(bar_ticks)
if pivot:
cb.ax.tick_params(labelrotation=90)
cb.ax.set_xlabel(cb_lbl)
cb.ax.get_xaxis().set_label_position('top')
def _setup_heatmap_xy(x_y, ax, lbls, pivot, hide_lbls, lbl_size, colors):
"""Setting all paramters for the x- and y axis of the two heatmap plots"""
dim = len(lbls)
if x_y == 'x':
# X-axis setup, colorbar bottom
ax.set_xlim(0, dim)
ticks = | np.arange(.5, dim) | numpy.arange |
import pylab # type: ignore
from numpy.random import RandomState # type: ignore
from sklearn.cluster import KMeans # type: ignore
import numpy # type: ignore
import confusion_matrix
# ## Debug values, replace with actuals
a = numpy.zeros(shape=(10, 100))
a2 = numpy.zeros(shape=(10, 100))
lines = None
lines2 = None
# ################################################################################
#
# ## dist of label counts
#
def get_cluster_counts(emb_matrix, lines, n_clusters, rng):
kmeans = KMeans(n_clusters=n_clusters, random_state=rng).fit(emb_matrix)
res = kmeans.predict(emb_matrix)
label_count_sen = [0 for i in range(len(kmeans.cluster_centers_))]
label_count_both = [0 for i in range(len(kmeans.cluster_centers_))]
for i in range(len(res)):
l = res[i]
line = lines[i]
if line.ground_truth == 4:
label_count_sen[l] += 1
label_count_both[l] += 1
return sorted(label_count_both)
n_clusters = 35
y1 = get_cluster_counts(a, lines, n_clusters, RandomState(2))
y2 = get_cluster_counts(a, lines, n_clusters, RandomState(387))
y3 = get_cluster_counts(a, lines, n_clusters, RandomState(910))
y4 = get_cluster_counts(a, lines, n_clusters, | RandomState(2094) | numpy.random.RandomState |
import os
import numpy as np
import scipy
from pyglm.utils import fftconv
def create_basis(prms):
""" Create a basis for impulse response functions
"""
type = prms['type'].lower()
if type == 'exp':
basis = create_exp_basis(prms)
elif type == 'cosine':
basis = create_cosine_basis(prms)
elif type == 'gaussian':
basis = create_gaussian_basis(prms)
elif type == 'identity' or type == 'eye':
basis = create_identity_basis(prms)
elif type == 'file':
if os.path.exists(prms["fname"]):
basis = load_basis_from_file(prms['fname'])
else:
raise Exception("Unrecognized basis type: %s", type)
return basis
def load_basis_from_file(prms):
"""
Load a basis from a file
"""
fname = prms["fname"]
if not os.path.exists(prms["fname"]):
raise Exception("Invalid basis file: %s", fname)
bas_dict = scipy.io.loadmat(fname)
if "basis" not in bas_dict.keys():
raise Exception("Invalid basis file: %s", fname)
basis = bas_dict["basis"]
#if T_max is not None:
# # Interpolate the basis at T_max evenly spaced points
# (t_bas,n_bas) = basis.shape
# cur_tt = np.linspace(0,1,t_bas)
# new_tt = np.linspace(0,1,T_max)
#
# new_basis = np.zeros((T_max,n_bas))
# for b in np.arange(n_bas):
# new_basis[:,b] = np.interp(new_tt,
# cur_tt,
# basis[:,b])
#
# basis = new_basis
return basis
def create_cosine_basis(prms):
"""
Create a basis of raised cosine tuning curves
"""
# Set default parameters. These can be overriden by kwargs
#prms = {'n_eye' : 0,
# 'n_cos' : 3,
# 'a': 1.0/120,
# 'b': 0.5,
# 'orth' : False,
# 'norm' : True}
#prms.update(kwargs)
n_pts = 100 # Number of points at which to evaluate the basis
n_cos = prms['n_cos'] # Number of cosine basis functions
n_eye = prms['n_eye'] # Number of identity basis functions
n_bas = n_eye + n_cos
basis = np.zeros((n_pts,n_bas))
# The first n_eye basis elements are identity vectors in the first time bins
basis[:n_eye,:n_eye] = np.eye(n_eye)
# The remaining basis elements are raised cosine functions with peaks
# logarithmically warped between [n_eye*dt:dt_max].
a = prms['a'] # Scaling in log time
b = prms['b'] # Offset in log time
nlin = lambda t: np.log(a*t+b) # Nonlinearity
u_ir = nlin(np.arange(n_pts)) # Time in log time
ctrs = u_ir[np.floor(np.linspace(n_eye,(n_pts/2.0),n_cos)).astype(np.int)]
if len(ctrs) == 1:
w = ctrs/2
else:
w = (ctrs[-1]-ctrs[0])/(n_cos-1) # Width of the cosine tuning curves
# Basis function is a raised cosine centered at c with width w
basis_fn = lambda u,c,w: (np.cos(np.maximum(-np.pi,np.minimum(np.pi,(u-c)*np.pi/w/2.0)))+1)/2.0
for i in np.arange(n_cos):
basis[:,n_eye+i] = basis_fn(u_ir,ctrs[i],w)
# Orthonormalize basis (this may decrease the number of effective basis vectors)
if prms['orth']:
basis = scipy.linalg.orth(basis)
if prms['norm']:
# We can only normalize nonnegative bases
if np.any(basis<0):
raise Exception("We can only normalize nonnegative impulse responses!")
# Normalize such that \int_0^1 b(t) dt = 1
basis = basis / np.tile(np.sum(basis,axis=0), [n_pts,1]) / (1.0/n_pts)
return basis
def create_exp_basis(prms):
"""
Create a basis of exponentially decaying functions
"""
# Set default parameters. These can be overriden by kwargs
# Default to a raised cosine basis
n_pts = 100 # Number of points at which to evaluate the basis
n_exp = prms['n_exp'] # Number of exponential basis functions
n_eye = prms['n_eye'] # Number of identity basis functions
n_bas = n_eye + n_exp
basis = np.zeros((n_pts,n_bas))
# The first n_eye basis elements are identity vectors in the first time bins
basis[:n_eye,:n_eye] = np.eye(n_eye)
# The remaining basis elements are exponential functions with logarithmically
# spaced time constants
taus = np.logspace(np.log10(1), np.log10(n_pts/3), n_exp)
# Basis function is a raised cosine centered at c with width w
basis_fn = lambda t,tau: np.exp(-t/tau)
for i in np.arange(n_exp):
basis[:,n_eye+i] = basis_fn(np.arange(n_pts),taus[i])
# Orthonormalize basis (this may decrease the number of effective basis vectors)
if prms['orth']:
basis = scipy.linalg.orth(basis)
if prms['norm']:
# We can only normalize nonnegative bases
if np.any(basis<0):
raise Exception("We can only normalize nonnegative impulse responses!")
# Normalize such that \int_0^1 b(t) dt = 1
basis = basis / np.tile(np.sum(basis,axis=0), [n_pts,1]) / (1.0/n_pts)
return basis
def create_gaussian_basis(prms):
"""
Create a basis of Gaussian bumps.
This is primarily for spatial filters.
"""
# Set default parameters. These can be overriden by kwargs
# Default to a raised cosine basis
n_gauss = prms['n_gauss'] # Tuple indicating number of Gaussian bumps along each dimension
n_dim = len(n_gauss)
n_eye = prms['n_eye'] # Number of identity basis functions
n_bas = n_eye + np.prod(n_gauss)
basis = np.zeros((n_bas,n_bas))
# The first n_eye basis elements are identity vectors in the first time bins
basis[:n_eye,:n_eye] = np.eye(n_eye)
# The remaining basis functions are Gaussian bumps at intervals of 1 in each dimension
sigma = 1
for g1 in np.arange(np.prod(n_gauss)):
mu = np.array(np.unravel_index(g1,n_gauss))
for g2 in np.arange(np.prod(n_gauss)):
x = np.array(np.unravel_index(g2,n_gauss))
basis[n_eye+g2,n_eye+g1] = np.exp(-0.5/(sigma**2)*np.sum((x-mu)**2))
# Basis function is a raised cosine centered at c with width w
#basis_fn = lambda t,mu,sig: np.exp(-0.5/(sig**2)*(t-mu)**2)
#for i in np.arange(n_gauss):
# basis[:,i] = basis_fn(np.arange(n_pts),mus[i],sigma)
# Orthonormalize basis (this may decrease the number of effective basis vectors)
if prms['orth']:
basis = scipy.linalg.orth(basis)
if prms['norm']:
# We can only normalize nonnegative bases
if np.any(basis<0):
raise Exception("We can only normalize nonnegative impulse responses!")
# Normalize such that \int_0^1 b(t) dt = 1
basis = basis / np.tile(np.sum(basis,axis=0), [basis.shape[0],1])
return basis
def create_identity_basis(prms):
"""
Create a basis of Gaussian bumps.
This is primarily for spatial filters.
"""
# Set default parameters. These can be overriden by kwargs
# Default to a raised cosine basis
n_eye = prms['n_eye'] # Number of identity basis functions
basis = np.eye(n_eye)
return basis
def convolve_with_basis(stim, basis):
""" Project stimulus onto a basis.
:param stim TxD matrix of inputs.
T is the number of time bins
D is the number of stimulus dimensions.
:param basis RxB basis matrix
R is the length of the impulse response
B is the number of bases
:rtype TxDxB tensor of stimuli convolved with bases
"""
(T,D) = stim.shape
(R,B) = basis.shape
import scipy.signal as sig
# First, by convention, the impulse responses are apply to times
# (t-R:t-1). That means we need to prepend a row of zeros to make
# sure the basis remains causal
basis = np.vstack((np.zeros((1,B)),basis))
# Initialize array for filtered stimulus
fstim = np.empty((T,D,B))
# Compute convolutions
for b in np.arange(B):
assert np.all(np.isreal(stim))
assert np.all(np.isreal(basis[:,b]))
# fstim[:,:,b] = sig.convolve2d(stim,
# np.reshape(basis[:,b],[R+1,1]),
# 'full')[:T,:]
fstim[:,:,b] = sig.fftconvolve(stim,
np.reshape(basis[:,b],[R+1,1]),
'full')[:T,:]
return fstim
def convolve_with_low_rank_2d_basis(stim, basis_x, basis_t):
""" Convolution with a low-rank 2D basis can be performed
by first convolving with the spatial basis (basis_x)
and then convolving with the temporal basis (basis_t)
"""
(T,D) = stim.shape
(Rx,Bx) = basis_x.shape
(Rt,Bt) = basis_t.shape
# Rx is the spatial "width" of the tuning curve. This should
# be equal to the "width" of the stimulus.
assert Rx==D, "ERROR: Spatial basis must be the same size as the stimulus"
import scipy.signal as sig
# First convolve with each stimulus filter
# Since the spatial stimulus filters are the same width as the spatial
# stimulus, we can just take the dot product to get the valid portion
fstimx = np.dot(stim, basis_x)
# Now convolve with the temporal basis.
# By convention, the impulse responses are apply to times
# (t-R:t-1). That means we need to prepend a row of zeros to make
# sure the basis remains causal
basis_t = np.vstack((np.zeros((1,Bt)),basis_t))
# Initialize array for the completely filtered stimulus
fstim = np.empty((T,Bx,Bt))
# Compute convolutions of the TxBx fstimx with each of the temporal bases
for b in np.arange(Bt):
fstim[:,:,b] = sig.fftconvolve(fstimx,
np.reshape(basis_t[:,b],[Rt+1,1]),
'full')[:T,:]
return fstim
_fft_cache = []
def convolve_with_2d_basis(stim, basis, shape=['first', 'valid']):
""" Project stimulus onto a basis.
:param stim TxD matrix of inputs.
T is the number of time bins
D is the number of stimulus dimensions.
:param basis TbxDb basis matrix
Tb is the length of the impulse response
Db is the number of basis dimensions.
:rtype Tx1 vector of stimuli convolved with the 2D basis
"""
(T,D) = stim.shape
(Tb,Db) = basis.shape
# assert D==Db, "Spatial dimension of basis must match spatial dimension of stimulus."
# import scipy.signal as sig
# First, by convention, the impulse responses are apply to times
# (t-R:t-1). That means we need to prepend a row of zeros to make
# sure the basis remains causal
basis = np.vstack((np.zeros((1,Db)),basis))
# Flip the spatial dimension for convolution
# We are convolving the stimulus with the filter, so the temporal part does
# NOT need to be flipped
basis = basis[:,::-1]
# Compute convolution using FFT
if D==Db and shape[1] == 'valid':
raise Warning("Use low rank convolution when D==Db!")
# Look for fft_stim in _fft_cache
fft_stim = None
for (cache_stim, cache_fft_stim) in _fft_cache:
if np.allclose(stim[-128:],cache_stim[-128:]) and \
np.allclose(stim[:128],cache_stim[:128]):
fft_stim = cache_fft_stim
break
if not fft_stim is None:
fstim,_ = fftconv.fftconvolve(stim, basis, 'full',
fft_in1=fft_stim)
else:
fstim,fft_stim,_ = fftconv.fftconvolve(stim, basis, 'full')
_fft_cache.append((stim,fft_stim))
# Slice the result
assert len(shape) == 2
if shape[0] == 'first':
fstim = fstim[:T,:]
else:
raise Exception('Only supporting \'first\' slicing for dimension 0 (time)')
if shape[1] == 'valid':
assert Db == D, 'Dimension of basis must match that of stimuli for valid'
elif shape[1] == 'central':
sz = D + Db - 1
start = (sz - D)/2
stop = start + D
fstim = fstim[:,start:stop]
return fstim
def convolve_with_3d_basis(stim, basis, shape=['first', 'central', 'central']):
""" Project stimulus onto a basis.
:param stim T x Dx x Dy array of inputs.
T is the number of time bins
Dx is the stimulus x dimension.
Dy is the stimulus y dimension.
:param basis Tb x Dbx x Dby basis matrix
Tb is the length of the impulse response
Dbx is the basis x dimension
Dby is the basis y dimension
:rtype Tx1 vector of stimuli convolved with the 2D basis
"""
assert stim.ndim == basis.ndim == 3
(T,Dx,Dy) = stim.shape
(Tb,Dbx,Dby) = basis.shape
# First, by convention, the impulse responses are apply to times
# (t-R:t-1). That means we need to prepend a row of zeros to make
# sure the basis remains causal
basis = np.concatenate((np.zeros((1,Dbx,Dby)),basis), axis=0)
# Flip the spatial dimension for convolution
# We are convolving the stimulus with the filter, so the temporal part does
# NOT need to be flipped
basis = basis[:,::-1, ::-1]
# Compute convolution using FFT
if Dx==Dbx and Dy==Dby and shape[1] == 'valid':
raise Warning("Use low rank convolution when D==Db!")
# Look for fft_stim in _fft_cache
fft_stim = None
for (cache_stim, cache_fft_stim) in _fft_cache:
if | np.allclose(stim[-128:],cache_stim[-128:]) | numpy.allclose |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
def get_default_axes3d(xlim=[-1, 1], ylim=[-1, 1], zlim=[-1, 1]):
""" Create a default `mpl_toolkits.mplot3d.Axes3D` object with default
axis limits on all axis from -1 to 1, and labels on the axes.
"""
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlim3d(xlim)
ax.set_ylim3d(ylim)
ax.set_zlim3d(zlim)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
return fig, ax
def plot_reference_frame(ax, tf=None, arrow_length=0.2):
""" Plot xyz-axes on axes3d object
Parameters
----------
ax : mpl_toolkits.mplot3d.Axes3D
Axes object for 3D plotting.
tf : np.array of float
Transform to specify location of axes. Plots in origin if None.
l : float
The length of the axes plotted.
"""
l = arrow_length
x_axis = np.array([[0, l], [0, 0], [0, 0]])
y_axis = np.array([[0, 0], [0, l], [0, 0]])
z_axis = | np.array([[0, 0], [0, 0], [0, l]]) | numpy.array |
from functools import partial
import re
import json
import random
import torch
import numpy as np
from scipy.optimize import minimize
from torch.nn import functional as F
from numpy import * # to override the math functions
from matplotlib import pyplot as plt
from tqdm import tqdm
import math
import warnings
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
# @torch.no_grad()
# def sample_from_model(model, x, steps, points=None, variables=None, temperature=1.0, sample=False, top_k=None):
# """
# take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
# the sequence, feeding the predictions back into the model each time. Clearly the sampling
# has quadratic complexity unlike an RNN that is only linear, and has a finite context window
# of block_size, unlike an RNN that has an infinite context window.
# """
# block_size = model.get_block_size()
# model.eval()
# for k in range(steps):
# x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
# logits, _ = model(x_cond, points=points, variables=variables)
# # pluck the logits at the final step and scale by temperature
# logits = logits[:, -1, :] / temperature
# # optionally crop probabilities to only the top k options
# if top_k is not None:
# logits = top_k_logits(logits, top_k)
# # apply softmax to convert to probabilities
# probs = F.softmax(logits, dim=-1)
# # sample from the distribution or take the most likely
# if sample:
# ix = torch.multinomial(probs, num_samples=1)
# else:
# _, ix = torch.topk(probs, k=1, dim=-1)
# # append to the sequence and continue
# x = torch.cat((x, ix), dim=1)
# return x
#use nucleus sampling from https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
def top_k_top_p_filtering(logits, top_k=0.0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k >0: keep only top k tokens with highest probability (top-k filtering).
top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
"""
#TODO: support for batch size more than 1
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
@torch.no_grad()
def sample_from_model(model, x, steps, points=None, index=None, variables=None, temperature=1.0, sample=False, top_k=0.0, top_p=0.0, is_finished=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
if is_finished is not None and is_finished(x):
break
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond, points=points, index=index, variables=variables)
# pluck the logits at the final step and scale by temperature
logits = logits[0, -1, :] / temperature
# optionally crop probabilities to only the top k options
# if top_k is not None:
# logits = top_k_logits(logits, top_k)
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix.unsqueeze(0)), dim=1)
return x
def plot_and_save_results(resultDict, fName, pconf, titleTemplate, textTest, modelKey='SymbolicGPT'):
if isinstance(resultDict, dict):
# plot the error frequency for model comparison
num_eqns = len(resultDict[fName][modelKey]['err'])
num_vars = pconf.numberofVars
title = titleTemplate.format(num_eqns, num_vars)
models = list(key for key in resultDict[fName].keys() if len(resultDict[fName][key]['err'])==num_eqns)
lists_of_error_scores = [resultDict[fName][key]['err'] for key in models if len(resultDict[fName][key]['err'])==num_eqns]
linestyles = ["-","dashdot","dotted","--"]
eps = 0.00001
y, x, _ = plt.hist([np.log([max(min(x+eps, 1e5),1e-5) for x in e]) for e in lists_of_error_scores],
label=models,
cumulative=True,
histtype="step",
bins=2000,
density=True,
log=False)
y = np.expand_dims(y,0)
plt.figure(figsize=(15, 10))
for idx, m in enumerate(models):
plt.plot(x[:-1],
y[idx] * 100,
linestyle=linestyles[idx],
label=m)
plt.legend(loc="upper left")
plt.title(title)
plt.xlabel("Log of Relative Mean Square Error")
plt.ylabel("Normalized Cumulative Frequency")
name = '{}.png'.format(fName.split('.txt')[0])
plt.savefig(name)
with open(fName, 'w', encoding="utf-8") as o:
for i in range(num_eqns):
err = resultDict[fName][modelKey]['err'][i]
eq = resultDict[fName][modelKey]['trg'][i]
predicted = resultDict[fName][modelKey]['prd'][i]
print('Test Case {}.'.format(i))
print('Target:{}\nSkeleton:{}'.format(eq, predicted))
print('Err:{}'.format(err))
print('') # just an empty line
o.write('Test Case {}/{}.\n'.format(i,len(textTest)-1))
o.write('{}\n'.format(eq))
o.write('{}:\n'.format(modelKey))
o.write('{}\n'.format(predicted))
o.write('{}\n{}\n\n'.format(
predicted,
err
))
print('Avg Err:{}'.format(np.mean(resultDict[fName][modelKey]['err'])))
def tokenize_predict_and_evaluate(i, inputs, points, outputs, variables,
train_dataset, textTest, trainer, model, resultDict,
numTests, variableEmbedding, blockSize, fName,
modelKey='SymbolicGPT', device='cpu'):
eq = ''.join([train_dataset.itos[int(i)] for i in outputs[0]])
eq = eq.strip(train_dataset.paddingToken).split('>')
eq = eq[0] #if len(eq[0])>=1 else eq[1]
eq = eq.strip('<').strip(">")
print(eq)
if variableEmbedding == 'STR_VAR':
eq = eq.split(':')[-1]
t = json.loads(textTest[i])
inputs = inputs[:,0:1].to(device)
points = points.to(device)
# points = points[:,:numPoints] # filter anything more than maximum number of points
variables = variables.to(device)
bestErr = 10000000
bestPredicted = 'C'
for i in range(numTests):
predicted, err = generate_sample_and_evaluate(
model, t, eq, inputs,
blockSize, points, variables,
train_dataset, variableEmbedding)
if err < bestErr:
bestErr = err
bestPredicted = predicted
resultDict[fName][modelKey]['err'].append(bestErr)
resultDict[fName][modelKey]['trg'].append(eq)
resultDict[fName][modelKey]['prd'].append(bestPredicted)
return eq, bestPredicted, bestErr
def generate_sample_and_evaluate(model, t, eq, inputs,
blockSize, points, variables,
train_dataset, variableEmbedding):
outputsHat = sample_from_model(model,
inputs,
blockSize,
points=points,
variables=variables,
temperature=0.9,
sample=True,
top_k=40,
top_p=0.7,
)[0]
# filter out predicted
predicted = ''.join([train_dataset.itos[int(i)] for i in outputsHat])
if variableEmbedding == 'STR_VAR':
predicted = predicted.split(':')[-1]
predicted = predicted.strip(train_dataset.paddingToken).split('>')
predicted = predicted[0] #if len(predicted[0])>=1 else predicted[1]
predicted = predicted.strip('<').strip(">")
predicted = predicted.replace('Ce','C*e')
# train a regressor to find the constants (too slow)
c = [1.0 for i,x in enumerate(predicted) if x=='C'] # initialize coefficients as 1
# c[-1] = 0 # initialize the constant as zero
b = [(-2,2) for i,x in enumerate(predicted) if x=='C'] # bounds on variables
try:
if len(c) != 0:
# This is the bottleneck in our algorithm
# for easier comparison, we are using minimize package
cHat = minimize(lossFunc, c, #bounds=b,
args=(predicted, t['X'], t['Y']))
predicted = predicted.replace('C','{}').format(*cHat.x)
except ValueError:
raise 'Err: Wrong Equation {}'.format(predicted)
except Exception as e:
raise 'Err: Wrong Equation {}, Err: {}'.format(predicted, e)
Ys = [] #t['YT']
Yhats = []
for xs in t['XT']:
try:
eqTmp = eq + '' # copy eq
eqTmp = eqTmp.replace(' ','')
eqTmp = eqTmp.replace('\n','')
for i,x in enumerate(xs):
# replace xi with the value in the eq
eqTmp = eqTmp.replace('x{}'.format(i+1), str(x))
if ',' in eqTmp:
assert 'There is a , in the equation!'
YEval = eval(eqTmp)
# YEval = 0 if np.isnan(YEval) else YEval
# YEval = 100 if np.isinf(YEval) else YEval
except:
print('TA: For some reason, we used the default value. Eq:{}'.format(eqTmp))
print(i)
raise
continue # if there is any point in the target equation that has any problem, ignore it
YEval = 100 #TODO: Maybe I have to punish the model for each wrong template not for each point
Ys.append(YEval)
try:
eqTmp = predicted + '' # copy eq
eqTmp = eqTmp.replace(' ','')
eqTmp = eqTmp.replace('\n','')
for i,x in enumerate(xs):
# replace xi with the value in the eq
eqTmp = eqTmp.replace('x{}'.format(i+1), str(x))
if ',' in eqTmp:
assert 'There is a , in the equation!'
Yhat = eval(eqTmp)
# Yhat = 0 if np.isnan(Yhat) else Yhat
# Yhat = 100 if np.isinf(Yhat) else Yhat
except:
print('PR: For some reason, we used the default value. Eq:{}'.format(eqTmp))
Yhat = 100
Yhats.append(Yhat)
err = relativeErr(Ys,Yhats, info=True)
print('\nTarget:{}'.format(eq))
print('Skeleton+LS:{}'.format(predicted))
print('Err:{}'.format(err))
print('-'*10)
if type(err) is np.complex128 or np.complex:
err = abs(err.real)
return predicted, err
# helper class and functions
# add a safe wrapper for numpy math functions
def divide(x, y):
x = | np.nan_to_num(x) | numpy.nan_to_num |
"""
Find dives
"""
import numpy as np
import pandas as pd
import scipy as sp
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
def plotDives(calc_file_path, new_file_path, is_export, min_length = 60, required_depth = None, max_depth = None, interest_variables = [], shading = 'deep'):
"""
This function pulls individual dives from the data that meet defined criteria.
It then plots these dives next to each other starting from a shared zeroed start time.
The dives can be colorscaled based on "interest variables"
Inputs:
min_length : type int or float, sets the minimum length of a dive in seconds before a dive is recorded (Default is 60 seconds)
required_depth : type int or float, a dive must reach this depth (same units as file) in order to be recorded (Defualt is None)
max_depth : type int or float, dives over that reach a depth greater than this will not be recorded (Default is None)
interest_variables : tpye list of string, each string is the name of a variable to coloscale dives, creates a subplot for each
shading : type string, choose any PlotLy colorscale to set the color (Default is 'deep')
Tips:
For cyclical data like Pitch, Roll, or Heading try setting 'shading' to 'icefire' one of PlotLy's cyclical colorscales
Though not technically cyclical, 'balance' provides a similar effect
"""
# Pull in Data
data = pd.read_csv(calc_file_path)
# Pull Specific Variables :
fs = data['fs'].tolist()[0]
depth = np.array(data["Depth"])
# Calculate time in terms of hours
numData = len(depth)
t = np.array([x/fs/3600 for x in range(numData)])
# Deifne the surface
sigma = | np.std(depth[0:fs*2]) | numpy.std |
import h5py
import numpy as np
"""File that calculates certain correlation functions from a given dataset and saves to a new dataset."""
# Import data:
filename = input('Enter filename: ')
data_path = '../../scratch/data/spin-1/kibble-zurek/{}.hdf5'.format(filename)
data_file = h5py.File(data_path, 'r')
# Loading grid array data:
x, y = data_file['grid/x'], data_file['grid/y']
X, Y = np.meshgrid(x[:], y[:], indexing='ij')
Nx, Ny = x[:].size, y[:].size
dx, dy = x[1] - x[0], y[1] - y[0]
dkx, dky = np.pi / (Nx / 2 * dx), np.pi / (Ny / 2 * dy)
kxx = | np.arange(-Nx // 2, Nx // 2) | numpy.arange |
import pickle
import os, re, sys
import shutil
import nibabel as nib
from scipy.fftpack import fftn, ifftn
import numpy as np
try:
import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib import animation
except:
print ('matplotlib not imported')
def progress_bar(curr_idx, max_idx, time_step, repeat_elem = "_"):
max_equals = 55
step_ms = int(time_step*1000)
num_equals = int(curr_idx*max_equals/float(max_idx))
len_reverse =len('Step:%d ms| %d/%d ['%(step_ms, curr_idx, max_idx)) + num_equals
sys.stdout.write("Step:%d ms|%d/%d [%s]" %(step_ms, curr_idx, max_idx, " " * max_equals,))
sys.stdout.flush()
sys.stdout.write("\b" * (max_equals+1))
sys.stdout.write(repeat_elem * num_equals)
sys.stdout.write("\b"*len_reverse)
sys.stdout.flush()
if curr_idx == max_idx:
print('\n')
def read_fft_volume(data4D, harmonic=1):
zslices = data4D.shape[2]
tframes = data4D.shape[3]
data3d_fft = np.empty((data4D.shape[:2]+(0,)))
for slice in range(zslices):
ff1 = fftn([data4D[:,:,slice, t] for t in range(tframes)])
fh = np.absolute(ifftn(ff1[harmonic, :, :]))
fh[fh < 0.1 * np.max(fh)] = 0.0
image = 1. * fh / np.max(fh)
# plt.imshow(image, cmap = 'gray')
# plt.show()
image = np.expand_dims(image, axis=2)
data3d_fft = np.append(data3d_fft, image, axis=2)
return data3d_fft
def save_data(data, filename, out_path):
out_filename = os.path.join(out_path, filename)
with open(out_filename, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
print ('saved to %s' % out_filename)
def load_pkl(path):
with open(path) as f:
obj = pickle.load(f)
return obj
def imshow(*args,**kwargs):
""" Handy function to show multiple plots in on row, possibly with different cmaps and titles
Usage:
imshow(img1, title="myPlot")
imshow(img1,img2, title=['title1','title2'])
imshow(img1,img2, cmap='hot')
imshow(img1,img2,cmap=['gray','Blues']) """
cmap = kwargs.get('cmap', 'gray')
title= kwargs.get('title','')
axis_off = kwargs.get('axis_off','')
if len(args)==0:
raise ValueError("No images given to imshow")
elif len(args)==1:
plt.title(title)
plt.imshow(args[0], interpolation='none')
else:
n=len(args)
if type(cmap)==str:
cmap = [cmap]*n
if type(title)==str:
title= [title]*n
plt.figure(figsize=(n*5,10))
for i in range(n):
plt.subplot(1,n,i+1)
plt.title(title[i])
plt.imshow(args[i], cmap[i])
if axis_off:
plt.axis('off')
plt.show()
def plot_roi(data4D, roi_center, roi_radii):
"""
Do the animation of full heart volume
"""
x_roi_center, y_roi_center = roi_center[0], roi_center[1]
x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1]
print ('nslices', data4D.shape[2])
zslices = data4D.shape[2]
tframes = data4D.shape[3]
slice_cnt = 0
for slice in [data4D[:,:,z,:] for z in range(zslices)]:
outdata = np.swapaxes(np.swapaxes(slice[:,:,:], 0,2), 1,2)
roi_mask = np.zeros_like(outdata[0])
roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius,
y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1
outdata[:, roi_mask > 0.5] = 0.8 * outdata[:, roi_mask > 0.5]
outdata[:, roi_mask > 0.5] = 0.8 * outdata[:, roi_mask > 0.5]
fig = plt.figure(1)
fig.canvas.set_window_title('slice_No' + str(slice_cnt))
slice_cnt+=1
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gray')
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=tframes, interval=50)
plt.show()
def plot_4D(data4D):
"""
Do the animation of full heart volume
"""
print ('nslices', data4D.shape[2])
zslices = data4D.shape[2]
tframes = data4D.shape[3]
slice_cnt = 0
for slice in [data4D[:,:,z,:] for z in range(zslices)]:
outdata = np.swapaxes(np.swapaxes(slice[:,:,:], 0,2), 1,2)
fig = plt.figure(1)
fig.canvas.set_window_title('slice_No' + str(slice_cnt))
slice_cnt+=1
def init_out():
im.set_data(outdata[0])
def animate_out(i):
im.set_data(outdata[i])
return im
im = fig.gca().imshow(outdata[0], cmap='gray')
anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=tframes, interval=50)
plt.show()
def multilabel_split(image_tensor):
"""
image_tensor : Batch * H * W
Split multilabel images and return stack of images
Returns: Tensor of shape: Batch * H * W * n_class (4D tensor)
# TODO: Be careful: when using this code: labels need to be
defined, explictly before hand as this code does not handle
missing labels
So far, this function is okay as it considers full volume for
finding out unique labels
"""
labels = np.unique(image_tensor)
batch_size = image_tensor.shape[0]
out_shape = image_tensor.shape + (len(labels),)
image_tensor_4D = np.zeros(out_shape, dtype='uint8')
for i in xrange(batch_size):
cnt = 0
shape =image_tensor.shape[1:3] + (len(labels),)
temp = np.ones(shape, dtype='uint8')
for label in labels:
temp[...,cnt] = np.where(image_tensor[i] == label, temp[...,cnt], 0)
cnt += 1
image_tensor_4D[i] = temp
return image_tensor_4D
def swapaxes_slv(vol):
return np.swapaxes( | np.swapaxes(vol,0,2) | numpy.swapaxes |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from datetime import timedelta as td
from itertools import product
import numpy as np
import pandas as pd
from floris.utilities import wrap_360
from . import utilities as fsut
def df_movingaverage(
df_in,
cols_angular,
window_width=td(seconds=60),
min_periods=1,
center=True,
calc_median_min_max_std=False,
return_index_mapping=False,
):
# Copy and ensure dataframe is indexed by time
df = df_in.copy()
if "time" in df.columns:
df = df.set_index("time")
# Find non-angular columns
if isinstance(cols_angular, bool):
if cols_angular:
cols_angular = [c for c in df.columns]
else:
cols_angular = []
cols_regular = [c for c in df.columns if c not in cols_angular]
# Now calculate cos and sin components for angular columns
sin_cols = ["{:s}_sin".format(c) for c in cols_angular]
cos_cols = ["{:s}_cos".format(c) for c in cols_angular]
df[sin_cols] = np.sin(df[cols_angular] * np.pi / 180.0)
df[cos_cols] = np.cos(df[cols_angular] * np.pi / 180.0)
# Drop angular columns
df = df.drop(columns=cols_angular)
# Now calculate rolling (moving) average
df_roll = df.rolling(
window_width,
center=center,
axis=0,
min_periods=min_periods
)
# First calculate mean values of non-angular columns
df_ma = df_roll[cols_regular].mean().copy()
# Now add mean values of angular columns
df_ma[cols_angular] = wrap_360(
np.arctan2(
df_roll[sin_cols].mean().values,
df_roll[cos_cols].mean().values
) * 180.0 / np.pi
)
# Figure out which indices/data points belong to each window
if (return_index_mapping or calc_median_min_max_std):
df_tmp = df_ma[[]].copy().reset_index(drop=False)
df_tmp["tmp"] = 1
df_tmp = df_tmp.rolling(window_width, center=center, axis=0, on="time")["tmp"]
# Grab index of first and last time entry for each window
windows_min = list(df_tmp.apply(lambda x: x.index[0]).astype(int))
windows_max = list(df_tmp.apply(lambda x: x.index[-1]).astype(int))
# Now create a large array that contains the array of indices, with
# the values in each row corresponding to the indices upon which that
# row's moving/rolling average is based. Note that we purposely create
# a larger matrix than necessary, since some rows/windows rely on more
# data (indices) than others. This is the case e.g., at the start of
# the dataset, at the end, and when there are gaps in the data. We fill
# the remaining matrix entries with "-1".
dn = int(np.ceil(window_width/fsut.estimate_dt(df_in["time"]))) + 5
data_indices = -1 * np.ones((df_ma.shape[0], dn), dtype=int)
for ii in range(len(windows_min)):
lb = windows_min[ii]
ub = windows_max[ii]
ind = np.arange(lb, ub + 1, dtype=int)
data_indices[ii, ind - lb] = ind
# Calculate median, min, max, std if necessary
if calc_median_min_max_std:
# Append all current columns with "_mean"
df_ma.columns = ["{:s}_mean".format(c) for c in df_ma.columns]
# Add statistics for regular columns
funs = ["median", "min", "max", "std"]
cols_reg_stats = ["_".join(i) for i in product(cols_regular, funs)]
df_ma[cols_reg_stats] = df_roll[cols_regular].agg(funs).copy()
# Add statistics for angular columns
# Firstly, create matrix with indices for the mean values
data_indices_mean = np.tile(np.arange(0, df_ma.shape[0]), (dn, 1)).T
# Grab raw and mean data and format as numpy arrays
D = df_in[cols_angular].values
M = df_ma[["{:s}_mean".format(c) for c in cols_angular]].values
# Add NaN row as last row. This corresponds to the -1 indices
# that we use as placeholders. This way, those indices do not
# count towards the final statistics (median, min, max, std).
D = np.vstack([D, np.nan * np.ones(D.shape[1])])
M = np.vstack([M, np.nan * np.ones(M.shape[1])])
# Now create a 3D matrix containing all values. The three dimensions
# come from:
# > [0] one dimension containing the rolling windows,
# > [1] one with the raw data underlying each rolling window,
# > [2] one for each angular column within the dataset
values = D[data_indices, :]
values_mean = M[data_indices_mean, :]
# Center values around values_mean
values[values > (values_mean + 180.0)] += -360.0
values[values < (values_mean - 180.0)] += 360.0
# Calculate statistical properties and wrap to [0, 360)
values_median = wrap_360(np.nanmedian(values, axis=1))
values_min = wrap_360(np.nanmin(values, axis=1))
values_max = wrap_360( | np.nanmax(values, axis=1) | numpy.nanmax |
"""
Utilities functions
"""
import numpy as np
import pandas as pd
import logging
import scipy
import scipy.linalg.lapack
from copy import deepcopy
logger = logging.getLogger(name=__name__)
# Random Categorical
def random_categorical(pvals, size=None):
out = np.random.multinomial(n=1, pvals=pvals, size=size).dot(
np.arange(len(pvals)))
if size is None:
return int(out)
else:
return np.array(out, dtype=int)
# Fixed Wishart
def array_wishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
def array_invwishart_rvs(df, scale, **kwargs):
""" Wrapper around scipy.stats.invwishart to always return a np.array """
if np.size(scale) == 1:
return np.array([[
scipy.stats.invwishart(df=df, scale=scale, **kwargs).rvs()
]])
else:
return scipy.stats.invwishart(df=df, scale=scale, **kwargs).rvs()
def array_invgamma_rvs(shape, scale, **kwargs):
""" Wrapper around scipy.stats.wishart to always return a np.array """
if np.size(scale) == 1:
return np.array([
scipy.stats.invgamma(a=shape, scale=scale, **kwargs).rvs()
])
else:
return scipy.stats.invgamma(a=shape, scale=scale, **kwargs).rvs()
# Matrix Normal LogPDF
def matrix_normal_logpdf(X, mean, Lrowprec, Lcolprec):
""" Numerical stable matrix normal logpdf
(when cholesky of precision matrices are known)
Args:
X (n by m ndarray): random variable instance
mean (n by m ndarray): mean
Lrowprec (n by n ndarray): chol of pos def row covariance (i.e. U^{-1})
Lcolprec (m by m ndarray): chol of pos def col covariance (i.e. V^{-1})
Returns:
logpdf = (-1/2*tr(V^{-1}(X-M)U^{-1}(X-M)) - nm/2*log(2pi) +
m/2*log|U^{-1}| + n/2*log|V^{-1}|)
"""
n, m = | np.shape(X) | numpy.shape |
# This source code is part of the Gecos package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__author__ = "<NAME>, <NAME>"
__all__ = ["ColorOptimizer", "ScoreFunction", "DefaultScoreFunction"]
from collections import namedtuple
import abc
import copy
import skimage
import numpy as np
import numpy.random as random
import biotite.sequence as seq
import biotite.sequence.align as align
from .colors import lab_to_rgb
MIN_L = 0
MAX_L = 99
MIN_AB = -128
MAX_AB = 127
class ColorOptimizer(object):
"""
Create an optimizer that tries to find an optimal color conformation
within a given color space based on a score function.
The optimizer tries to minimize the return value of the score
function by adjusting the *Lab* values (coordinates) for each
symbol in a given alphabet.
The optimizer uses the random number generator from *NumPy*.
Therefore, call :func:`numpy.random.seed()` to set the seed for the
optimizer
Parameters
----------
alphabet : biotite.sequence.Alphabet
The alphabet to calculate the color conformation for.
score_function : ScoreFunction or callable
The score function which should be minimized.
When calling the object, its only parameter must be an array of
coordinates with shape *(n, 3)*, where *n* is the length of the
alphabet.
Its return value must be a single float - the score.
space : ColorSpace
The color space that defines the allowed space for the
coordinates.
constraints : ndarray, shape=(n,3), dtype=float, optional
An array whose non-NaN values are interpreted as constraints.
Constrained values will be fixed during the optimization.
"""
class Result(namedtuple("Result", ["alphabet", "trajectory", "scores"])):
"""
The result of an optimization.
Contains the final color scheme information as well as the
course of the coordinates and the score during the optimization.
Parameters
----------
alphabet : biotite.sequence.Alphabet
The alphabet the optimizer used.
trajectory : ndarray, shape=(m,n,3), dtype=float
The course of the coordinates during the simulation.
scores : ndarray, shape=(m,), dtype=float
The course of the score during the simulation.
Attributes
----------
alphabet : biotite.sequence.Alphabet
The alphabet the optimizer used.
trajectory : ndarray, shape=(m,n,3), dtype=float
The course of coordinates during the simulation.
lab_colors : ndarray, shape=(n,3), dtype=float
The final *Lab* color conformation, i.e. the last element of
`trajectory`.
rgb_colors : ndarray, shape=(n,3), dtype=float
The final color conformation converted into *RGB* colors.
scores : ndarray, shape=(m,), dtype=float
The course of the score during the simulation.
score : float
The final score, i.e. the last element of `scores`.
"""
@property
def score(self):
return self.scores[-1]
@property
def lab_colors(self):
return copy.deepcopy(self.trajectory[-1])
@property
def rgb_colors(self):
return lab_to_rgb(self.lab_colors.astype(int))
def __init__(self, alphabet, score_function, space, constraints=None):
self._alphabet = alphabet
self._n_symbols = len(alphabet)
self._score_func = score_function
self._space = space.space.copy()
self._coord = None
self._trajectory = []
self._scores = []
if constraints is None:
self._constraints = np.full((self._n_symbols, 3), np.nan)
else:
for constraint in constraints:
if not np.isnan(constraint).any() and \
not self._is_allowed(constraint):
raise ValueError(
f"Constraint {constraint} is outside the allowed space"
)
self._constraints = constraints.copy()
### Set initial conformation ###
# Every symbol has the 'l', 'a' and 'b' coordinates
# The coordinates are initially filled with values
# that are guaranteed to be invalid (l cannot be -1)
start_coord = np.full((self._n_symbols, 3), -1, dtype=float)
# Chose start position from allowed positions at random
for i in range(start_coord.shape[0]):
while not self._is_allowed(start_coord[i]):
drawn_coord = random.rand(3)
drawn_coord[..., 0] *= (MAX_L -MIN_L ) + MIN_L
drawn_coord[..., 1:] *= (MAX_AB-MIN_AB) + MIN_AB
start_coord[i] = drawn_coord
self._apply_constraints(start_coord)
self._set_coordinates(start_coord)
def set_coordinates(self, coord):
"""
Set the the coordinates of the current color conformation.
Potential color constraints are applied on these.
This coordinate changes will be tracked in the trajectory.
Parameters
----------
coord : ndarray, shape=(n,3), dtype=float
The new coordinates.
"""
if coord.shape != (self._n_symbols, 3):
raise ValueError(
f"Given shape is {coord.shape}, "
f"but expected shape is {(len(self._alphabet), 3)}"
)
for c in coord:
if not self._is_allowed(c):
raise ValueError(
f"Coordinates {c} are outside the allowed space"
)
coord = coord.copy()
self._apply_constraints(coord)
self._set_coordinates(coord)
def _set_coordinates(self, coord, score=None):
self._coord = coord
self._trajectory.append(coord)
if score is None:
score = self._score_func(coord)
self._scores.append(score)
def optimize(self, n_steps,
beta_start, rate_beta, stepsize_start, stepsize_end):
r"""
Perform a Simulated Annealing optimization on the current
coordinate to minimize the score returned by the score function.
This is basically a Monte-Carlo optimization where the
temperature is varied according to a so called annealing
schedule over the course of the optimization.
The algorithm is a heuristic thats motivated by the physical
process of annealing.
If we, e.g., cool steel than a slow cooling can yield a superior
quality, whereas for a fast cooling the steel can become
brittle.
The same happens here within the search space for the given
minimization task.
Parameters
----------
n_steps : int
The number of Simulated-Annealing steps.
beta_start : float
The inverse start temperature, where the start temperature
would be :math:`T_{start} = 1/(k_b \cdot \beta_{start})` with
:math:`k_b` being the boltzmann constant.
rate_beta: float
The rate controlls how fast the inverse temperature is
increased within the annealing schedule.
Here the exponential schedule is chosen so we have
:math:`\beta (t) = \beta_0 \cdot \exp(rate \cdot t)`.
stepsize_start : float
The radius in which the coordinates are randomly altered at
the beginning of the simulated anneling algorithm.
Like the inverse temperature the step size follows an
exponential schedule, enabling the algorithm
to do large perturbartions at the beginning of the algorithm
run and increasingly smaller ones afterwards.
stepsize_end : float
The radius in which the coordinates are randomly altered at
the end of the simulated annealing algorithm run.
"""
# Calculate the max value 'i' can reach so that
# 'np.exp(rate_beta*i)' does not overflow
max_i = np.log(np.finfo(np.float64).max) / rate_beta
beta = lambda i: beta_start*np.exp(rate_beta*i) \
if i < max_i else np.inf
# Choose rate so that stepsize_end reached after n_steps
# derived from step_size(N_steps) = steps_end
if stepsize_start == stepsize_end:
rate_stepsize = 0
else:
rate_stepsize = | np.log(stepsize_end / stepsize_start) | numpy.log |
from __future__ import absolute_import
from __future__ import print_function
import os
import glob
import numpy as np
from scipy.interpolate import UnivariateSpline
from .core import file_finder, load_probe, load_fs, load_clusters, load_spikes
from .core import find_info, find_kwd, find_kwik, find_kwx
import h5py as h5
import json
from six.moves import range
@file_finder
def find_mean_waveforms(block_path, cluster, cluster_store=0, clustering='main'):
'''
Returns the mean waveform file for a given cluster found in the block path
Parameters
------
block_path : str
path to the block
cluster : int
the cluster identifier
cluster_store : int
the cluster store identifier
clustering : str, optional
ID of clustering
Returns
------
mean_waveforms_file : full path name to mean_waveforms file
'''
return os.path.join(block_path,
'*.phy',
'cluster_store',
str(cluster_store),
clustering,
'{}.mean_waveforms'.format(cluster)
)
@file_finder
def find_mean_masks(block_path, cluster, cluster_store=0, clustering='main'):
'''
Returns the mean masks file for a given cluster found in the block path
Parameters
------
block_path : str
path to the block
cluster : int
the cluster identifier
cluster_store : int
the cluster store identifier
clustering : str, optional
ID of clustering
Returns
------
mean_masks_file : full path name to mean_waveforms file
'''
return os.path.join(block_path,
'*.phy',
'cluster_store',
str(cluster_store),
clustering,
'{}.mean_masks'.format(cluster)
)
def mean_masks_w(block_path, cluster):
'''
Weights are equivalent to the mean_mask values for the channel.
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
Returns
------
w : weight vector
'''
mean_masks = find_mean_masks(block_path, cluster)
mean_masks_arr = np.fromfile(mean_masks, dtype=np.float32)
return mean_masks_arr
def max_masks_w(block_path, cluster):
'''
Places all weight on the channel(s) which have the largest mean mask values.
If more than one channel have a mean_mask value equal to the max, these
channels will be weighted equally.
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
Returns
------
w : weight vector
'''
w = mean_masks_w(block_path, cluster)
return w == w.max()
def get_cluster_coords(block_path, cluster, weight_func=None):
'''
Returns the location of a given cluster on the probe in x,y coordinates
in whatever units and reference the probe file uses.
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
weight_func : function
function which takes `block_path` and `cluster` as args and returns a weight
vector for the coordinates. default: max_masks_w
Returns
------
xy : numpy array of coordinates
'''
if weight_func is None:
weight_func = max_masks_w
w = weight_func(block_path, cluster)
prb_info = load_probe(block_path)
channels = prb_info.channel_groups[0]['channels']
geometry = prb_info.channel_groups[0]['geometry']
coords = np.array([geometry[ch] for ch in channels])
return np.dot(w, coords) / w.sum()
# spike shapes
def upsample_spike(spike_shape, fs, new_fs=1000000.0):
'''
upsamples a spike shape to prepare it for computing the spike width
Parameters
------
spike_shape : numpy array
the spike shape
fs : float
the sampling rate of the spike shape
new_fs : float
sampling rate to upsample to (default=200Hz)
Returns
------
time : numpy array
array of sample times in seconds
new_spike_shape :
upsampled spike shape
'''
t_max = spike_shape.shape[0] / fs
t = np.arange(0, t_max, 1 / fs)[:spike_shape.shape[0]]
spl = UnivariateSpline(t, spike_shape)
ts = np.arange(0, t_max, 1 / new_fs)
return ts, spl(ts)
def get_troughpeak(time, spike_shape):
'''
Grabs the time of the trough and peak
Parameters
------
time : numpy array
time series of spike data
spike_shape : numpy array
the spike shape
Returns
------
trough_time : float
time of trough in seconds
peak_time : float
time of peak in seconds
'''
trough_i = spike_shape.argmin()
peak_i = spike_shape[trough_i:].argmax() + trough_i
return time[trough_i], time[peak_i]
def get_width_half_height(time,spike_shape):
'''
grabs the time between the zero crossings around trough after normalize to min height and offset by 0.5
Parameters
------
time : numpy array
spike_shape : numpy array %should be up-sampled to at least 100000 before calculating
Returns
------
width_half_height : float
time between crossings in seconds
'''
width = np.nan;
trough,peak = get_troughpeak(time,spike_shape)
ind = np.where(time == trough)
troughind = ind[0][0]
spike_shape /= -spike_shape.min()
spike_shape = spike_shape + 0.5
zero_crossings = np.where(np.diff(np.sign(spike_shape)))[0]
i = zero_crossings < troughind
j = zero_crossings > troughind
if (True in i) & (True in j):
# zero crossings before trough
i = (np.where(i))
pre_ind = zero_crossings[max(i[0])]
# zero crossings after trough
j = (np.where(j))
post_ind = zero_crossings[min(j[0])]
width = time[post_ind] - time[pre_ind]
return width
def get_width(block_path, cluster, new_fs=1000000.0):
'''
grabs the time of the trough and peak
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
new_fs : float
sampling rate to upsample to (default=200Hz)
Returns
------
width : float
the width of the spike in seconds
'''
fs = load_fs(block_path)
exemplar = get_spike_exemplar(block_path, cluster)
trough, peak = get_troughpeak(*upsample_spike(exemplar, fs, new_fs=new_fs))
return peak - trough
def get_mean_waveform_array(block_path, cluster):
'''
returns the mean spike shape on all channels
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
Returns
------
mean_waveform_array : numpy array
mean waveform on principal channel. shape: (time_samples,channels)
'''
prb_info = load_probe(block_path)
mean_waveform = find_mean_waveforms(block_path, cluster)
shape = (-1, len(prb_info.channel_groups[0]['channels']))
return np.fromfile(mean_waveform, dtype=np.float32).reshape(shape)
def get_spike_exemplar(block_path, cluster):
'''
Returns an exemplar of the spike shape on the principal channel
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
Returns
------
exemplar : numpy array
mean waveform on principal channel
'''
mean_waveform = find_mean_waveforms(block_path, cluster)
arr = get_mean_waveform_array(block_path, cluster)
mean_masks = find_mean_masks(block_path, cluster)
mean_masks_arr = np.fromfile(mean_masks, dtype=np.float32)
return arr[:, mean_masks_arr.argmax()]
def get_wide_narrow(block_path, cluster_list, thresh):
'''
Return lists of clusters
Parameters
------
block_path : str
the path to the block
cluster_list : array
a list of cluster identifiers
thresh : float
minimum duration of spike to be considered wide
Returns
------
wide : list of clusters with width greater than the threshold
narrow : list of clusters with width less than the threshold
'''
wide = []
narrow = []
for cluster in cluster_list:
sw = get_width(block_path, cluster)
if sw >= thresh:
wide.append(cluster)
else:
narrow.append(cluster)
return (wide, narrow)
def make_phy_folder(block_path):
'''
Create a directory for phy clusters
Parameters
------
block_path : str
the path to the block
Returns
------
phy folder: string
path to phy directory
'''
kwikf = find_kwik(block_path)
kwikfname = os.path.split(kwikf)[1]
kwikname = os.path.splitext(kwikfname)[0]
phy_fold = os.path.join(block_path, kwikname + '.phy')
phy_fold = os.path.abspath(os.path.join(phy_fold, 'cluster_store/0/main/'))
if not os.path.exists(phy_fold):
os.makedirs(phy_fold)
return phy_fold
def spikeindices(block_path, cluster, channel_group=0, clustering='main'):
'''
Return a list of indices of spikes for a cluster
Parameters
------
block_path : str
the path to the block
cluster : int
the cluster identifier
channel_group :
the channel group identifier
clustering : str, optional
ID of clustering
Returns
------
indices : numpy array
indices of spikes in a specified cluster
'''
with h5.File(find_kwik(block_path), 'r') as kwikf:
sptimes = kwikf[
'/channel_groups/{}/spikes/clusters/{}'.format(channel_group, clustering)][:]
return (sptimes == cluster)
def compute_cluster_waveforms(block_path):
'''
legacy method for computing cluster waveforms
Parameters
------
block_path : str
the path to the block
'''
with open(find_info(block_path), 'rb') as infofile:
info = json.load(infofile)
prespike = info['params']['prespike']
postspike = info['params']['postspike']
nchans = info['params']['nchan']
spikes = load_spikes(block_path)
clusters = spikes['cluster'].unique()
phy_fold = make_phy_folder(block_path)
for cluster in clusters:
print("Cluster: {}".format(cluster))
cluspikes = spikes[spikes['cluster'] == cluster]
cluspiketimes = cluspikes['time_samples'].values
mean_waveform = np.zeros((prespike + postspike, nchans))
waveforms = np.zeros((len(cluspiketimes), prespike + postspike, nchans))
with h5.File(find_kwd(block_path), 'r') as kwdf:
for ind, sptime in enumerate(cluspiketimes):
test = np.zeros((prespike + postspike, nchans))
start_ind = max((int(sptime - prespike)), 0)
start_ind2 = abs(min(int(sptime - prespike), 0))
test[start_ind2:] = kwdf['/recordings/0/data'][start_ind:int(sptime + postspike), :]
waveforms[ind, :, :] = test
mean_waveform += test
waveforms = waveforms.flatten()
mean_waveform /= len(cluspiketimes)
mean_waveform = mean_waveform.flatten()
with h5.File(find_kwx(block_path), 'r') as kwxf:
cluster_spike_inds = spikeindices(block_path, cluster)
nspike = | np.count_nonzero(cluster_spike_inds) | numpy.count_nonzero |
import logging
import numpy as np
from keras.layers.advanced_activations import ReLU
from keras.regularizers import l2
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.initializers import RandomNormal
from keras.layers import Add
from keras.layers import Concatenate
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import DepthwiseConv2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Input
from keras.layers import ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.advanced_activations import PReLU
def invResidual(config, container):
if not config.module_name:
raise ValueError('Missing module name in section')
config.layer_name = (
config.layer_name if config.layer_name else str(len(container.all_layers) - 1)
)
logging.info("frames: ", container.frames)
s = 0
if config.shift:
logging.info("3D conv block")
tsize = 3
else:
logging.info("2D conv block")
tsize = 1
config.size = int(config.size)
prev_layer = container.all_layers[-1]
prev_layer_shape = K.int_shape(prev_layer)
input_channels = prev_layer_shape[-1]
x_channels = input_channels * config.xratio
image_size = prev_layer_shape[-3], prev_layer_shape[-2]
logging.info("input image size: ", image_size)
num_convs = int(container.frames / config.tstride)
inputs_needed = (config.tstride * (num_convs - 1)) + tsize
# inputs_needed = frames + tsize - 1
if inputs_needed > 1:
logging.info("inputs_needed: ", inputs_needed)
old_frames_to_read = inputs_needed - container.frames
new_frames_to_save = min(container.frames, old_frames_to_read)
logging.info(
"num_convs: ",
num_convs,
"inputs_needed: ",
inputs_needed,
"history frames needed: ",
old_frames_to_read,
"frames to save: ",
new_frames_to_save,
"tstride: ",
config.tstride,
)
# create (optional) expansion pointwise convolution layer
input_indexes = []
for i in range(num_convs):
input_indexes.append(
len(container.all_layers) - container.frames + (i * config.tstride)
)
if config.xratio != 1:
logging.info("---------- Insert channel multiplier pointwise conv -------------")
# attach output ports to inputs we will need next pass if tsize>1
for f in range(new_frames_to_save):
container.out_index.append(len(container.all_layers) - container.frames + f)
container.out_names.append(config.module_name + "_save_" + str(f))
# create input ports for required old frames if tsize>1
for f in range(old_frames_to_read):
h_name = config.module_name + "_history_" + str(f)
container.all_layers.append(
Input(shape=(image_size[0], image_size[1], input_channels), name=h_name)
)
container.in_names.append(h_name)
container.in_index.append(len(container.all_layers) - 1)
# get weights
n = config.module_name + ".conv." + str(s) + ".0."
if n + "weight" in container.weights:
weights_pt = container.weights[n + "weight"]
logging.info(
"checkpoint: ",
weights_pt.shape,
)
weights_k = np.transpose(weights_pt, [2, 3, 1, 0])
bias = container.weights[n + "bias"]
else:
logging.info("missing weight ", n + "weight")
weights_k = np.random.rand(1, 1, tsize * input_channels, x_channels)
bias = np.zeros(x_channels)
container.fake_weights = True
expected_weights_shape = (1, 1, tsize * input_channels, x_channels)
logging.info(
"weight shape, expected : ",
expected_weights_shape,
"transposed: ",
weights_k.shape,
)
if weights_k.shape != expected_weights_shape:
logging.info("weight matrix shape is wrong, making a fake one")
weights_k = np.random.rand(1, 1, tsize * input_channels, x_channels)
bias = np.zeros(x_channels)
container.fake_weights = True
weights = [weights_k, bias]
inputs = []
outputs = []
for f in range(inputs_needed):
inputs.append(
container.all_layers[len(container.all_layers) - inputs_needed + f]
)
if config.merge_in > 0:
inputs.append(
container.all_layers[
len(container.all_layers) - (2 * inputs_needed) + f
]
)
for f in range(int(container.frames / config.tstride)):
layers = []
if tsize > 1:
for t in range(tsize):
# offset is constant with f, except if tstride,
# then steps by extra step every time through
layers.append(inputs[(tsize - t - 1) + (f * (config.tstride))])
cat_layer = Concatenate()(layers)
else:
cat_layer = inputs[f * (config.tstride)]
outputs.append(
(
Conv2D(
x_channels,
(1, 1),
use_bias=not config.batch_normalize,
weights=weights,
activation=None,
padding="same",
)
)(cat_layer)
)
logging.info(
"parallel convs: ",
int(container.frames / config.tstride),
" : ",
K.int_shape(cat_layer),
)
if config.activation == "leaky":
for f in range(int(container.frames / config.tstride)):
if not container.conversion_parameters["use_prelu"]:
outputs[f] = LeakyReLU(alpha=0.1)(outputs[f])
else:
outputs[f] = PReLU(
alpha_initializer=RandomNormal(mean=0.1, stddev=0.0, seed=None),
shared_axes=[1, 2],
)(outputs[f])
elif config.activation == "relu6":
for f in range(int(container.frames / config.tstride)):
outputs[f] = ReLU(max_value=6)(outputs[f])
for f in range(int(container.frames / config.tstride)):
container.all_layers.append(outputs[f])
s += 1
container.frames = int(container.frames / config.tstride)
else:
logging.info("Skipping channel multiplier pointwise conv, no expansion")
# create groupwise convolution
# get weights
logging.info("---------- Depthwise conv -------------")
n = config.module_name + ".conv." + str(s) + ".0."
logging.info("module name base: ", n)
if n + "weight" in container.weights:
weights_pt = container.weights[n + "weight"]
logging.info(
"checkpoint: ",
weights_pt.shape,
)
weights_k = np.transpose(weights_pt, [2, 3, 0, 1])
bias = container.weights[n + "bias"]
else:
logging.info("missing weight ", n + "weight")
weights_k = np.random.rand(config.size, config.size, x_channels, 1)
bias = np.zeros(x_channels)
container.fake_weights = True
expected_weights_shape = (config.size, config.size, x_channels, 1)
logging.info(
"weight shape, expected : ",
expected_weights_shape,
"transposed: ",
weights_k.shape,
)
if weights_k.shape != expected_weights_shape:
logging.info("weight matrix shape is wrong, making a fake one")
container.fake_weights = True
weights_k = np.random.rand(config.size, config.size, x_channels, 1)
bias = np.zeros(x_channels)
weights = [weights_k, bias]
inputs = []
outputs = []
padding = "same" if config.pad == 1 and config.stride == 1 else "valid"
for f in range(container.frames):
inputs.append(
container.all_layers[len(container.all_layers) - container.frames + f]
)
if config.stride > 1:
for f in range(len(inputs)):
if config.size == 3: # originally for all sizes
inputs[f] = ZeroPadding2D(
((config.size - config.stride, 0), (config.size - config.stride, 0))
)(inputs[f])
elif config.size == 5: # I found this works...
inputs[f] = ZeroPadding2D(((2, 2), (2, 2)))(inputs[f])
else:
logging.info("I have no idea what to do for size ", config.size)
exit()
logging.info("parallel convs: ", f, " : ", K.int_shape(inputs[0]), "padding: ", padding)
for f in range(container.frames):
outputs.append(
(
DepthwiseConv2D(
(config.size, config.size),
strides=(config.stride, config.stride),
use_bias=not config.batch_normalize,
weights=weights,
activation=None,
padding=padding,
)
)(inputs[f])
)
if config.activation == "leaky":
for f in range(int(container.frames)):
if not container.conversion_parameters["use_prelu"]:
outputs[f] = LeakyReLU(alpha=0.1)(outputs[f])
else:
outputs[f] = PReLU(
alpha_initializer=RandomNormal(mean=0.1, stddev=0.0, seed=None),
shared_axes=[1, 2],
)(outputs[f])
elif config.activation == "relu6":
for f in range(int(container.frames)):
outputs[f] = ReLU(max_value=6)(outputs[f])
for f in range(int(container.frames)):
container.all_layers.append(outputs[f])
s += 1
# create pointwise convolution
# get weights
logging.info("---------- Pointwise conv -------------")
n = config.module_name + ".conv." + str(s) + "."
logging.info("module name base: ", n)
if n + "weight" in container.weights:
weights_pt = container.weights[n + "weight"]
logging.info(
"checkpoint: ",
weights_pt.shape,
)
weights_k = np.transpose(weights_pt, [2, 3, 1, 0])
bias = container.weights[n + "bias"]
else:
logging.info("missing weight ", n + "weight")
container.fake_weights = True
weights_k = np.random.rand(1, 1, x_channels, config.out_channels)
bias = np.zeros(config.out_channels)
expected_weights_shape = (1, 1, x_channels, config.out_channels)
logging.info(
"weight shape, expected : ",
expected_weights_shape,
"transposed: ",
weights_k.shape,
)
if weights_k.shape != expected_weights_shape:
logging.info("weight matrix shape is wrong, making a fake one")
container.fake_weights = True
weights_k = np.random.rand(1, 1, x_channels, config.out_channels)
bias = np.zeros(config.out_channels)
weights = [weights_k, bias]
logging.info("combined shape: ", weights[0].shape, weights[1].shape)
inputs = []
outputs = []
for f in range(container.frames):
inputs.append(
container.all_layers[len(container.all_layers) - container.frames + f]
)
logging.info(
"parallel convs: ",
f,
" : ",
K.int_shape(container.all_layers[len(container.all_layers) - container.frames]),
)
for f in range(container.frames):
conv_input = container.all_layers[
len(container.all_layers) - container.frames + f
]
outputs.append(
(
Conv2D(
config.out_channels,
(1, 1),
use_bias=not config.batch_normalize,
weights=weights,
activation=None,
padding="same",
)
)(conv_input)
)
if config.stride == 1 and input_channels == config.out_channels:
for f in range(int(container.frames)):
container.all_layers.append(
Add()([container.all_layers[input_indexes[f]], outputs[f]])
)
else:
for f in range(int(container.frames)):
container.all_layers.append(outputs[f])
s += 1
def convolutional(config, container):
if not config.module_name:
raise ValueError('Missing module name in section')
config.layer_name = (
config.layer_name if config.layer_name else str(len(container.all_layers) - 1)
)
config.size = int(config.size)
if container.frames > 1:
logging.info("frames: ", container.frames)
prev_layer_shape = K.int_shape(container.all_layers[-1])
input_channels = prev_layer_shape[-1]
image_size = prev_layer_shape[-3], prev_layer_shape[-2]
num_convs = int(container.frames / config.tstride)
if num_convs > 1:
logging.info("num_convs: ", num_convs)
inputs_needed = (config.tstride * (num_convs - 1)) + config.tsize
# inputs_needed = frames + tsize - 1
if inputs_needed > 1:
logging.info("inputs_needed: ", inputs_needed)
old_frames_to_read = inputs_needed - container.frames
if old_frames_to_read < 0:
logging.info("negative number of old frames!!!!!!!!!")
if old_frames_to_read:
logging.info("history frames needed: ", old_frames_to_read)
new_frames_to_save = min(container.frames, old_frames_to_read)
if new_frames_to_save:
logging.info("new frames to save: ", new_frames_to_save)
# attach output ports to inputs we will need next pass
if config.no_output is False:
for f in range(new_frames_to_save):
container.out_index.append(len(container.all_layers) - container.frames + f)
container.out_names.append(config.module_name + "_save_" + str(f))
# attach output ports to unsaved inputs if we need to share inputs to a slave network
if config.share is True:
for f in range(new_frames_to_save, container.frames):
container.out_index.append(len(container.all_layers) - container.frames + f)
container.out_names.append(config.module_name + "_share_" + str(f))
# create input ports for required old frames
for f in range(old_frames_to_read):
xx = config.module_name + "_history_" + str(f)
container.in_names.append(xx)
if config.image_input:
container.image_inputs.append(xx)
container.all_layers.append(
Input(shape=(image_size[0], image_size[1], input_channels), name=xx)
)
container.in_index.append(len(container.all_layers) - 1)
# create input ports for merged-in frames
if config.merge_in > 0:
input_channels = input_channels + config.merge_in
for f in range(inputs_needed):
xx = config.module_name + "_merge_in_" + str(f)
container.in_names.append(xx)
container.all_layers.append(
Input(shape=(image_size[0], image_size[1], config.merge_in), name=xx)
)
logging.info("merge_in input at: ", len(container.all_layers) - 1)
container.in_index.append(len(container.all_layers) - 1)
padding = "same" if config.pad == 1 and config.stride == 1 else "valid"
# extract parameter for this module from Pytorch checkpoint file
conv_weights_pt = np.random.rand(
input_channels, config.filters, config.tsize, config.size, config.size
)
conv_bias = [0]
if config.module_name + ".weight" in container.weights:
conv_weights_pt = container.weights[config.module_name + ".weight"]
logging.info(
"weight: ",
config.module_name + ".weight",
container.weights[config.module_name + ".weight"].shape,
)
# convert to tsize list of 2d conv weight matrices, transposed for Keras
w_list = []
if len(conv_weights_pt.shape) == 5: # check if this is a 3D conv being unfolded
for t in range(config.tsize):
w_list.append(
np.transpose(
conv_weights_pt[:, :, config.tsize - 1 - t, :, :], [2, 3, 1, 0]
)
)
else: # this is simply a single 2D conv
w_list.append(np.transpose(conv_weights_pt[:, :, :, :], [2, 3, 1, 0]))
# concatenate along the in_dim axis the tsize matrices
conv_weights = | np.concatenate(w_list, axis=2) | numpy.concatenate |
"""
creation.py
--------------
Create meshes from primitives, or with operations.
"""
from .base import Trimesh
from .constants import log, tol
from .geometry import faces_to_edges, align_vectors, plane_transform
from . import util
from . import grouping
from . import triangles
from . import transformations as tf
import numpy as np
import collections
try:
# shapely is a soft dependency
from shapely.geometry import Polygon
from shapely.wkb import loads as load_wkb
except BaseException as E:
# shapely will sometimes raise OSErrors
# on import rather than just ImportError
from . import exceptions
# re-raise the exception when someone tries
# to use the module that they don't have
Polygon = exceptions.closure(E)
load_wkb = exceptions.closure(E)
def revolve(linestring,
angle=None,
sections=None,
transform=None,
**kwargs):
"""
Revolve a 2D line string around the 2D Y axis, with a result with
the 2D Y axis pointing along the 3D Z axis.
This function is intended to handle the complexity of indexing
and is intended to be used to create all radially symmetric primitives,
eventually including cylinders, annular cylinders, capsules, cones,
and UV spheres.
Note that if your linestring is closed, it needs to be counterclockwise
if you would like face winding and normals facing outwards.
Parameters
-------------
linestring : (n, 2) float
Lines in 2D which will be revolved
angle : None or float
Angle in radians to revolve curve by
sections : None or int
Number of sections result should have
If not specified default is 32 per revolution
transform : None or (4, 4) float
Transform to apply to mesh after construction
**kwargs : dict
Passed to Trimesh constructor
Returns
--------------
revolved : Trimesh
Mesh representing revolved result
"""
linestring = np.asanyarray(linestring, dtype=np.float64)
# linestring must be ordered 2D points
if len(linestring.shape) != 2 or linestring.shape[1] != 2:
raise ValueError('linestring must be 2D!')
if angle is None:
# default to closing the revolution
angle = np.pi * 2
closed = True
else:
# check passed angle value
closed = angle >= ((np.pi * 2) - 1e-8)
if sections is None:
# default to 32 sections for a full revolution
sections = int(angle / (np.pi * 2) * 32)
# change to face count
sections += 1
# create equally spaced angles
theta = np.linspace(0, angle, sections)
# 2D points around the revolution
points = np.column_stack((np.cos(theta), np.sin(theta)))
# how many points per slice
per = len(linestring)
# use the 2D X component as radius
radius = linestring[:, 0]
# use the 2D Y component as the height along revolution
height = linestring[:, 1]
# a lot of tiling to get our 3D vertices
vertices = np.column_stack((
np.tile(points, (1, per)).reshape((-1, 2)) *
np.tile(radius, len(points)).reshape((-1, 1)),
np.tile(height, len(points))))
if closed:
# should be a duplicate set of vertices
assert np.allclose(vertices[:per],
vertices[-per:])
# chop off duplicate vertices
vertices = vertices[:-per]
if transform is not None:
# apply transform to vertices
vertices = tf.transform_points(vertices, transform)
# how many slices of the pie
slices = len(theta) - 1
# start with a quad for every segment
# this is a superset which will then be reduced
quad = np.array([0, per, 1,
1, per, per + 1])
# stack the faces for a single slice of the revolution
single = np.tile(quad, per).reshape((-1, 3))
# `per` is basically the stride of the vertices
single += np.tile(np.arange(per), (2, 1)).T.reshape((-1, 1))
# remove any zero-area triangle
# this covers many cases without having to think too much
single = single[triangles.area(vertices[single]) > tol.merge]
# how much to offset each slice
# note arange multiplied by vertex stride
# but tiled by the number of faces we actually have
offset = np.tile(np.arange(slices) * per,
(len(single), 1)).T.reshape((-1, 1))
# stack a single slice into N slices
stacked = np.tile(single.ravel(), slices).reshape((-1, 3))
if tol.strict:
# make sure we didn't screw up stacking operation
assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0)
# offset stacked and wrap vertices
faces = (stacked + offset) % len(vertices)
# create the mesh from our vertices and faces
mesh = Trimesh(vertices=vertices, faces=faces,
**kwargs)
# strict checks run only in unit tests
if (tol.strict and
np.allclose(radius[[0, -1]], 0.0) or
np.allclose(linestring[0], linestring[-1])):
# if revolved curve starts and ends with zero radius
# it should really be a valid volume, unless the sign
# reversed on the input linestring
assert mesh.is_volume
return mesh
def extrude_polygon(polygon,
height,
transform=None,
triangle_args=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh
Parameters
----------
polygon : shapely.geometry.Polygon
2D geometry to extrude
height : float
Distance to extrude polygon along Z
triangle_args : str or None
Passed to triangle
**kwargs:
passed to Trimesh
Returns
----------
mesh : trimesh.Trimesh
Resulting extrusion as watertight body
"""
# create a triangulation from the polygon
vertices, faces = triangulate_polygon(
polygon, triangle_args=triangle_args, **kwargs)
# extrude that triangulation along Z
mesh = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
transform=transform,
**kwargs)
return mesh
def sweep_polygon(polygon,
path,
angles=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh along an
arbitrary 3D path. Doesn't handle sharp curvature well.
Parameters
----------
polygon : shapely.geometry.Polygon
Profile to sweep along path
path : (n, 3) float
A path in 3D
angles : (n,) float
Optional rotation angle relative to prior vertex
at each vertex
Returns
-------
mesh : trimesh.Trimesh
Geometry of result
"""
path = np.asanyarray(path, dtype=np.float64)
if not util.is_shape(path, (-1, 3)):
raise ValueError('Path must be (n, 3)!')
# Extract 2D vertices and triangulation
verts_2d = np.array(polygon.exterior)[:-1]
base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs)
n = len(verts_2d)
# Create basis for first planar polygon cap
x, y, z = util.generate_basis(path[0] - path[1])
tf_mat = np.ones((4, 4))
tf_mat[:3, :3] = np.c_[x, y, z]
tf_mat[:3, 3] = path[0]
# Compute 3D locations of those vertices
verts_3d = np.c_[verts_2d, np.zeros(n)]
verts_3d = tf.transform_points(verts_3d, tf_mat)
base_verts_3d = np.c_[base_verts_2d,
np.zeros(len(base_verts_2d))]
base_verts_3d = tf.transform_points(base_verts_3d,
tf_mat)
# keep matching sequence of vertices and 0- indexed faces
vertices = [base_verts_3d]
faces = [faces_2d]
# Compute plane normals for each turn --
# each turn induces a plane halfway between the two vectors
v1s = util.unitize(path[1:-1] - path[:-2])
v2s = util.unitize(path[1:-1] - path[2:])
norms = np.cross( | np.cross(v1s, v2s) | numpy.cross |
import time
import cv2
import numpy as np
from numba import njit
from scipy.ndimage import correlate
from sklearn.linear_model import Ridge
def compute_image_grads(image):
kernel_hor = np.array([-1, 0, 1], dtype=np.float32).reshape(1, 3)
kernel_ver = kernel_hor.T
grad_hor = correlate(image.astype(np.float32), kernel_hor)
grad_ver = correlate(image.astype(np.float32), kernel_ver)
grads = np.maximum(grad_hor, grad_ver)
return grads
def compute_gradient_sensivity(image):
height, width = image.shape
lapl_diff = np.array([
[ 1, -2, 1],
[-2, 4, -2],
[ 1, -2, 1]
], dtype=np.float32)
convolved = correlate(image.astype(np.float32), lapl_diff)
factor = | np.sqrt(np.pi / 2) | numpy.sqrt |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.spatial import distance
from scipy.optimize import differential_evolution
class IntracellAnalysisV2:
# IA constants
FC_UPPER_VOLTAGE = 4.20
FC_LOWER_VOLTAGE = 2.70
NE_UPPER_VOLTAGE = 0.01
NE_LOWER_VOLTAGE = 1.50
PE_UPPER_VOLTAGE = 4.30
PE_LOWER_VOLTAGE = 2.86
THRESHOLD = 4.84 * 0.0
def __init__(self,
pe_pristine_file,
ne_pristine_file,
cycle_type='rpt_0.2C',
step_type=0,
error_type='V-Q',
ne_2pos_file=None,
ne_2neg_file=None
):
"""
Invokes the cell electrode analysis class. This is a class designed to fit the cell and electrode
parameters in order to determine changes of electrodes within the full cell from only full cell cycling data.
Args:
pe_pristine_file (str): file name for the half cell data of the pristine (uncycled) positive
electrode
ne_pristine_file (str): file name for the half cell data of the pristine (uncycled) negative
electrode
cycle_type (str): type of diagnostic cycle for the fitting
step_type (int): charge or discharge (0 for charge, 1 for discharge)
error_type (str): defines which error metric is to be used
ne_2neg_file (str): file name of the data for the negative component of the anode
ne_2pos_file (str): file name of the data for the positive component of the anode
"""
self.pe_pristine = pd.read_csv(pe_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
self.ne_1_pristine = pd.read_csv(ne_pristine_file, usecols=['SOC_aligned', 'Voltage_aligned'])
if ne_2neg_file and ne_2pos_file:
self.ne_2_pristine_pos = pd.read_csv(ne_2pos_file)
self.ne_2_pristine_neg = pd.read_csv(ne_2neg_file)
else:
self.ne_2_pristine_pos = pd.DataFrame()
self.ne_2_pristine_neg = pd.DataFrame()
if step_type == 0:
self.capacity_col = 'charge_capacity'
else:
self.capacity_col = 'discharge_capacity'
self.cycle_type = cycle_type
self.step_type = step_type
self.error_type = error_type
def process_beep_cycle_data_for_candidate_halfcell_analysis_ah(self,
cell_struct,
cycle_index):
"""
Ingests BEEP structured cycling data and cycle_index and returns
a Dataframe of evenly spaced capacity with corresponding voltage.
Inputs:
cell_struct (MaccorDatapath): BEEP structured cycling data
cycle_index (int): cycle number at which to evaluate
Outputs:
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned (evenly spaced)
and Voltage_aligned
"""
# filter the data down to the diagnostic type of interest
diag_type_cycles = cell_struct.diagnostic_data.loc[cell_struct.diagnostic_data['cycle_type'] == self.cycle_type]
real_cell_candidate_charge_profile = diag_type_cycles.loc[
(diag_type_cycles.cycle_index == cycle_index)
& (diag_type_cycles.step_type == 0) # step_type = 0 is charge, 1 is discharge
& (diag_type_cycles.voltage < self.FC_UPPER_VOLTAGE)
& (diag_type_cycles[self.capacity_col] > 0)][['voltage', 'charge_capacity']]
# renaming capacity,voltage column
real_cell_candidate_charge_profile['Q'] = real_cell_candidate_charge_profile['charge_capacity']
real_cell_candidate_charge_profile['Voltage'] = real_cell_candidate_charge_profile['voltage']
real_cell_candidate_charge_profile.drop('voltage', axis=1, inplace=True)
# interpolate voltage along evenly spaced capacity axis
q_vec = np.linspace(0, np.max(real_cell_candidate_charge_profile['Q']), 1001)
real_cell_candidate_charge_profile_aligned = pd.DataFrame()
real_cell_candidate_charge_profile_interper = interp1d(real_cell_candidate_charge_profile['Q'],
real_cell_candidate_charge_profile['Voltage'],
bounds_error=False,
fill_value=(
self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE))
real_cell_candidate_charge_profile_aligned['Voltage_aligned'] = real_cell_candidate_charge_profile_interper(
q_vec)
real_cell_candidate_charge_profile_aligned['Q_aligned'] = q_vec
return real_cell_candidate_charge_profile_aligned
def _impose_electrode_scale(self,
pe_pristine=pd.DataFrame(),
ne_1_pristine=pd.DataFrame(),
ne_2_pristine_pos=pd.DataFrame(),
ne_2_pristine_neg=pd.DataFrame(),
lli=0.0, q_pe=0.0, q_ne=0.0, x_ne_2=0.0):
"""
Scales the reference electrodes according to specified capacities and
offsets their capacities according to lli. Blends negative electrode materials.
Inputs:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
lli (float): Loss of Lithium Inventory - capacity of the misalignment between
cathode and anode zero-capacity
q_pe (float): capacity of the positive electrode (cathode)
q_ne (float): capacity of the negative electrode (anode)
x_ne_2 (float): fraction of ne_2_pristine_pos or ne_2_pristine_neg
(positive or negative value, respectively) to ne_1_pristine
Outputs:
pe_degraded (Dataframe): positive electrode with imposed capacity
scale to emulate degradation
ne_degraded (Dataframe): negative electrode with imposed capacity
scale and capacity offset to emulate degradation
"""
# Blend negative electrodes
ne_pristine = blend_electrodes(ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg, x_ne_2)
# rescaling pristine electrodes to q_pe and q_ne
pe_q_scaled = pe_pristine.copy()
pe_q_scaled['Q_aligned'] = (pe_q_scaled['SOC_aligned'] / 100) * q_pe
ne_q_scaled = ne_pristine.copy()
ne_q_scaled['Q_aligned'] = (ne_q_scaled['SOC_aligned'] / 100) * q_ne
# translate pristine ne electrode with lli
ne_q_scaled['Q_aligned'] = ne_q_scaled['Q_aligned'] + lli
# Re-interpolate to align dataframes for differencing
lower_q = np.min((np.min(pe_q_scaled['Q_aligned']),
np.min(ne_q_scaled['Q_aligned'])))
upper_q = np.max((np.max(pe_q_scaled['Q_aligned']),
np.max(ne_q_scaled['Q_aligned'])))
q_vec = np.linspace(lower_q, upper_q, 1001)
# Actually aligning the electrode Q's
pe_pristine_interper = interp1d(pe_q_scaled['Q_aligned'],
pe_q_scaled['Voltage_aligned'], bounds_error=False)
pe_degraded = pe_q_scaled.copy()
pe_degraded['Q_aligned'] = q_vec
pe_degraded['Voltage_aligned'] = pe_pristine_interper(q_vec)
ne_pristine_interper = interp1d(ne_q_scaled['Q_aligned'],
ne_q_scaled['Voltage_aligned'], bounds_error=False)
ne_degraded = ne_q_scaled.copy()
ne_degraded['Q_aligned'] = q_vec
ne_degraded['Voltage_aligned'] = ne_pristine_interper(q_vec)
# Returning pe and ne degraded on an Ah basis
return pe_degraded, ne_degraded
def halfcell_degradation_matching_ah(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage. Interpolates real and emulated data onto
a common capacity axis.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_aligned (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_aligned (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
(pe_pristine,
ne_1_pristine,
ne_2_pristine_pos,
ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned) = params
# output degraded ne and pe (on a AH basis, with electrode alignment
# (NaNs for voltage, when no capacity actually at the corresponding capacity index))
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne, x_ne_2)
# PE - NE = full cell voltage
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = pd.DataFrame()
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned'].copy()
zeroing_value = emulated_full_cell_with_degradation['Q_aligned'].loc[
np.nanargmin(emulated_full_cell_with_degradation['Voltage_aligned'])
]
emulated_full_cell_with_degradation_zeroed['Q_aligned'] = \
(emulated_full_cell_with_degradation['Q_aligned'].copy() - zeroing_value)
pe_out_zeroed = pe_out.copy()
pe_out_zeroed['Q_aligned'] = pe_out['Q_aligned'] - zeroing_value
ne_out_zeroed = ne_out.copy()
ne_out_zeroed['Q_aligned'] = ne_out['Q_aligned'] - zeroing_value
# Interpolate full cell profiles across same Q range
min_q = np.min(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
max_q = np.max(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
bounds_error=False)
real_interper = interp1d(
real_cell_candidate_charge_profile_aligned['Q_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
real_cell_candidate_charge_profile_aligned['Voltage_aligned'].loc[
~real_cell_candidate_charge_profile_aligned['Voltage_aligned'].isna()],
bounds_error=False)
q_vec = np.linspace(min_q, max_q, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['Q_aligned'] = q_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(q_vec)
real_aligned = pd.DataFrame()
real_aligned['Q_aligned'] = q_vec
real_aligned['Voltage_aligned'] = real_interper(q_vec)
return pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned
def get_dqdv_over_v_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dqdv representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dq_dv_over_v_real (Dataframe): dqdv across voltage for the real cell data
dq_dv_over_v_emulated (Dataframe): dqdv across voltage for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dqdv from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['Q_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Q_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV']).ewm(0.1).mean()
# Include original data
dq_dv_real['Q_aligned'] = df_real_interped['Q_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate dQdV and Q over V, aligns real and emulated over V
voltage_vec = np.linspace(self.FC_LOWER_VOLTAGE, self.FC_UPPER_VOLTAGE, 1001)
v_dq_dv_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['dQdV'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['Q_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
v_dq_dv_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['dQdV'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_q_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['Q_aligned'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, np.max(df_real_interped['Q_aligned'])))
dq_dv_over_v_real = pd.DataFrame(v_dq_dv_interper_real(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_real['Q_aligned'] = v_q_interper_real(voltage_vec)
dq_dv_over_v_real['Voltage_aligned'] = voltage_vec
dq_dv_over_v_emulated = pd.DataFrame(v_dq_dv_interper_emulated(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_emulated['Q_aligned'] = v_q_interper_emulated(voltage_vec)
dq_dv_over_v_emulated['Voltage_aligned'] = voltage_vec
return (pe_out_zeroed,
ne_out_zeroed,
dq_dv_over_v_real,
dq_dv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_dvdq_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the dVdQ representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
dv_dq_real (Dataframe): dVdQ across capacity for the real cell data
dv_dq_emulated (Dataframe): dVdQ across capacity for the emulated cell data
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_ah(x, *params)
# Calculate dQdV from full cell profiles
dv_dq_real = pd.DataFrame(np.gradient(df_real_interped['Voltage_aligned'], df_real_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
dv_dq_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Voltage_aligned'], emulated_full_cell_interped['Q_aligned']),
columns=['dVdQ']).ewm(0.1).mean()
# Include original data
dv_dq_real['Q_aligned'] = df_real_interped['Q_aligned']
dv_dq_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_emulated['Q_aligned'] = emulated_full_cell_interped['Q_aligned']
dv_dq_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Q interpolation not needed, as interpolated over Q by default
return (pe_out_zeroed,
ne_out_zeroed,
dv_dq_real,
dv_dq_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_v_over_q_from_degradation_matching_ah(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the emulated cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
df_real_interped (Dataframe): capacity and voltage interpolated evenly across
capacity for the real cell data
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned) = \
self.halfcell_degradation_matching_ah(x, *params)
min_soc_full_cell = np.min(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
max_soc_full_cell = np.max(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].Q_aligned)
soc_vec_full_cell = np.linspace(min_soc_full_cell, max_soc_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
real_full_cell_interper = interp1d(real_aligned.Q_aligned.loc[~real_aligned.Voltage_aligned.isna()],
real_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = soc_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(soc_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['Q_aligned'] = soc_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(soc_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, df_real_interped, emulated_full_cell_interped
def get_v_over_q_from_degradation_matching_ah_no_real(self, x, *params):
"""
This function imposes degradation scaling ,then outputs the V-Q representation of the
emulated cell data, in the absence of real cell data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_full_cell_interped (Dataframe): capacity and voltage interpolated evenly
across capacity for the emulated cell data
"""
(pe_out_zeroed, ne_out_zeroed, emulated_aligned) = \
self.halfcell_degradation_matching_ah_no_real(x, *params)
min_q_full_cell = np.min(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
max_q_full_cell = np.max(emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].Q_aligned)
q_vec_full_cell = np.linspace(min_q_full_cell, max_q_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.Q_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~emulated_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['Q_aligned'] = q_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(q_vec_full_cell)
return pe_out_zeroed, ne_out_zeroed, emulated_full_cell_interped
def halfcell_degradation_matching_ah_no_real(self, x, *params):
"""
Calls underlying functions to impose degradation through electrode
capacity scale and alignment through LLI. Modifies emulated full cell
data to be within full cell voltage range and calibrates (zeros) capacity
at the lowest permissible voltage.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
emulated_aligned (Dataframe): full cell data corresponding to the imposed degradation
"""
lli = x[0]
q_pe = x[1]
q_ne = x[2]
x_ne_2 = x[3]
pe_pristine, ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg = params
pe_out, ne_out = self._impose_electrode_scale(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, q_pe,
q_ne,
x_ne_2)
# outputs degraded ne and pe (on a AH basis, with electrode alignment (NaNs for voltage, when no overlap))
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['Q_aligned'] = pe_out['Q_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
# Replace emulated full cell values outside of voltage range with NaN
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] < self.FC_LOWER_VOLTAGE] = np.nan
emulated_full_cell_with_degradation['Voltage_aligned'].loc[
emulated_full_cell_with_degradation['Voltage_aligned'] > self.FC_UPPER_VOLTAGE] = np.nan
# Center the emulated full cell and half cell curves onto the same Q at which the real (degraded)
# capacity measurement started (self.FC_LOWER_VOLTAGE)
emulated_full_cell_with_degradation_zeroed = pd.DataFrame()
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned']
zeroing_value = emulated_full_cell_with_degradation['Q_aligned'].loc[
np.nanargmin(emulated_full_cell_with_degradation['Voltage_aligned'])
]
emulated_full_cell_with_degradation_zeroed['Q_aligned'] = \
(emulated_full_cell_with_degradation['Q_aligned'] - zeroing_value)
pe_out_zeroed = pe_out.copy()
pe_out_zeroed['Q_aligned'] = pe_out['Q_aligned'] - zeroing_value
ne_out_zeroed = ne_out.copy()
ne_out_zeroed['Q_aligned'] = ne_out['Q_aligned'] - zeroing_value
# Interpolate full profiles across same Q range
min_q = np.min(
emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()])
max_q = np.max(
emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_zeroed['Q_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_zeroed['Voltage_aligned'].isna()],
bounds_error=False)
q_vec = np.linspace(min_q, max_q, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['Q_aligned'] = q_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(q_vec)
return pe_out_zeroed, ne_out_zeroed, emulated_aligned
def _get_error_from_degradation_matching_ah(self, x, *params):
"""
Wrapper function which selects the correct error sub routine and returns its error value.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error value (float) - output of the specified error sub function
"""
error_type = self.error_type
if error_type == 'V-Q':
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
elif error_type == 'dVdQ':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
elif error_type == 'dQdV':
return self._get_error_from_degradation_matching_dqdv(x, *params)[0]
else:
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
def _get_error_from_degradation_matching_v_q(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the V-Q representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containingEuclidean distance of each point of the real curve to
the closest value on the emulated curve in the V-Q representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
(pe_out_zeroed, ne_out_zeroed, real_aligned, emulated_aligned
) = self.get_v_over_q_from_degradation_matching_ah(x, *params)
xa = real_aligned.dropna()
xb = emulated_aligned.dropna()
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
# Pairwise euclidean from premade dQdV
def _get_error_from_degradation_matching_dqdv(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dQdV representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containing Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dQdV representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
# Call dQdV generating function
(pe_out_zeroed,
ne_out_zeroed,
dqdv_over_v_real,
dqdv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dqdv_over_v_from_degradation_matching_ah(x, *params)
xa = dqdv_over_v_real[['Voltage_aligned', 'dQdV']].dropna()
xb = dqdv_over_v_emulated[['Voltage_aligned', 'dQdV']].dropna()
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
def _get_error_from_degradation_matching_dvdq(self, x, *params):
"""
Error function returning the mean standardized Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dVdQ representation.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error (float): output of the specified error sub function
error_vector (array): vector containing Euclidean distance of each point of the real curve to
the closest value on the emulated curve in the dVdQ representation
xa (Dataframe): real full cell data used for error analysis
xb (Dataframe): emulated full cell data used for error analysis
"""
try:
(pe_out_zeroed,
ne_out_zeroed,
dvdq_over_q_real,
dvdq_over_q_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dvdq_over_q_from_degradation_matching_ah(x, *params)
xa = dvdq_over_q_real[['Q_aligned', 'dVdQ']].dropna()
xb = dvdq_over_q_emulated[['Q_aligned', 'dVdQ']].dropna()
# down-select to values with capacity more than 0.5 Ahr to eliminate high-slope region of dVdQ
xa = xa.loc[(xa.Q_aligned > 0.5)]
xb = xb.loc[(xb.Q_aligned > 0.5)]
error_matrix = distance.cdist(xa, xb, 'seuclidean')
error_vector = error_matrix.min(axis=1)
error = error_vector.mean()
except ValueError:
error = 100
return error, None, None, None
return error, error_vector, xa, xb
def _get_error_from_synthetic_fitting_ah(self, x, *params):
"""
Wrapper function which selects the correct error sub routine and returns its error value.
This function is specific to fitting synthetic data rather than real cycling data.
Inputs:
x (list): [LLI, q_pe, q_ne, x_ne_2]
*params:
pe_pristine (Dataframe): half cell data of the pristine (uncycled) positive
electrode
ne_pristine (Dataframe): half cell data of the pristine (uncycled) negative
electrode
ne_2_pos (Dataframe): half cell data for the positive component of the anode
ne_2_neg (Dataframe): half cell data for the negative component of the anode
real_cell_candidate_charge_profile_aligned (Dataframe): columns Q_aligned
(evenly spaced) and Voltage_aligned
Outputs:
error value (float) - output of the specified error sub function
"""
error_type = self.error_type
try:
if error_type == 'V-Q':
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
elif error_type == 'dVdQ':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
elif error_type == 'dQdV':
return self._get_error_from_degradation_matching_dvdq(x, *params)[0]
else:
return self._get_error_from_degradation_matching_v_q(x, *params)[0]
except RuntimeError:
print("Can't return error")
return 100
def intracell_values_wrapper_ah(self,
cycle_index,
cell_struct,
degradation_bounds=None
):
"""
Wrapper function to solve capacity sizing and offset of reference electrodes to real full cell cycle data.
Inputs:
cycle_index (int): the index of the cycle of interest of the structured real cycling data
cell_struct (MaccorDatapath): BEEP structured cycling data
Outputs:
loss_dict (dict): dictionary with key of cycle index and entry of a list of
error, lli_opt, q_pe_opt, q_ne_opt, x_ne_2, Q_li
profiles_dict (dict): dictionary with key of cycle index and entry of a dictionary
containing various key/entry pairs of resulting from the fitting
"""
if degradation_bounds is None:
degradation_bounds = ((0, 3), # LLI
(2.5, 6.5), # q_pe
(2.5, 6.5), # q_ne
(1, 1), # (-1,1) x_ne_2
)
real_cell_candidate_charge_profile_aligned = self.process_beep_cycle_data_for_candidate_halfcell_analysis_ah(
cell_struct,
cycle_index)
degradation_optimization_result = differential_evolution(self._get_error_from_degradation_matching_ah,
degradation_bounds,
args=(self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned
),
strategy='best1bin', maxiter=100000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7,
seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube',
atol=0, updating='deferred', workers=-1,
constraints=()
)
# print(degradation_optimization_result.x) #BVV
(pe_out_zeroed,
ne_out_zeroed,
dqdv_over_v_real,
dqdv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dqdv_over_v_from_degradation_matching_ah(
degradation_optimization_result.x,
self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned)
#
electrode_info_df = get_electrode_info_ah(pe_out_zeroed, ne_out_zeroed)
#
error = degradation_optimization_result.fun
lli_opt = degradation_optimization_result.x[0]
q_pe_opt = degradation_optimization_result.x[1]
q_ne_opt = degradation_optimization_result.x[2]
x_ne_2 = degradation_optimization_result.x[3]
loss_dict = {cycle_index: np.append([error, lli_opt, q_pe_opt, q_ne_opt,
x_ne_2],
electrode_info_df.iloc[-1].values)
}
profiles_per_cycle_dict = {
'NE_zeroed': ne_out_zeroed,
'PE_zeroed': pe_out_zeroed,
'dQdV_over_v_real': dqdv_over_v_real,
'dQdV_over_v_emulated': dqdv_over_v_emulated,
'df_real_interped': df_real_interped,
'emulated_full_cell_interped': emulated_full_cell_interped,
'real_cell_candidate_charge_profile_aligned': real_cell_candidate_charge_profile_aligned
}
profiles_dict = {cycle_index: profiles_per_cycle_dict}
return loss_dict, profiles_dict
def solve_emulated_degradation(self,
forward_simulated_profile,
degradation_bounds=None
):
"""
"""
if degradation_bounds is None:
degradation_bounds = ((0, 3), # LLI
(2.5, 6.5), # q_pe
(2.5, 6.5), # q_ne
(1, 1), # (-1,1) x_ne_2
)
degradation_optimization_result = differential_evolution(self._get_error_from_synthetic_fitting_ah,
degradation_bounds,
args=(self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
forward_simulated_profile,
),
strategy='best1bin', maxiter=100000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7,
seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube',
atol=0, updating='deferred', workers=-1,
constraints=()
)
return degradation_optimization_result
# TODO revisit this function
def blend_electrodes(electrode_1, electrode_2_pos, electrode_2_neg, x_2):
"""
Blends two electrode materials from their SOC-V profiles to form a blended electrode.
Inputs:
electrode_1: Primary material in electrode, typically Gr. DataFrame supplied with SOC evenly spaced and voltage.
electrode_2: Secondary material in electrode, typically Si. DataFrame supplied with SOC evenly spaced and
voltage as an additional column.
x_2: Fraction of electrode_2 material's capacity (not mass). Supplied as scalar value.
Outputs:
df_blended_soc_mod (Dataframe): blended electrode with SOC_aligned and Voltage_aligned columns
"""
if electrode_2_pos.empty:
df_blended = electrode_1
return df_blended
if electrode_2_neg.empty:
electrode_2 = electrode_2_pos
x_2 = np.abs(x_2)
elif x_2 > 0:
electrode_2 = electrode_2_pos
else:
electrode_2 = electrode_2_neg
x_2 = np.abs(x_2)
electrode_1_interper = interp1d(electrode_1['Voltage_aligned'], electrode_1['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
electrode_2_interper = interp1d(electrode_2['Voltage_aligned'], electrode_2['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
voltage_vec = np.linspace(np.min((np.min(electrode_1['Voltage_aligned']),
np.min(electrode_2['Voltage_aligned']))),
np.max((np.max(electrode_1['Voltage_aligned']),
np.max(electrode_2['Voltage_aligned']))),
1001)
electrode_1_voltage_aligned = pd.DataFrame(electrode_1_interper(voltage_vec), columns=['SOC'])
electrode_2_voltage_aligned = pd.DataFrame(electrode_2_interper(voltage_vec), columns=['SOC'])
electrode_1_voltage_aligned['Voltage'] = voltage_vec
electrode_2_voltage_aligned['Voltage'] = voltage_vec
df_blend_voltage_aligned = pd.DataFrame(
(1 - x_2) * electrode_1_voltage_aligned['SOC'] + x_2 * electrode_2_voltage_aligned['SOC'], columns=['SOC'])
df_blend_voltage_aligned['Voltage'] = electrode_1_voltage_aligned.merge(electrode_2_voltage_aligned,
on='Voltage')['Voltage']
df_blended_interper = interp1d(df_blend_voltage_aligned['SOC'], df_blend_voltage_aligned['Voltage'],
bounds_error=False)
soc_vec = np.linspace(0, 100, 1001)
df_blended = pd.DataFrame(df_blended_interper(soc_vec), columns=['Voltage_aligned'])
df_blended['SOC_aligned'] = soc_vec
# Modify NE to fully span 100% SOC within its valid voltage window
df_blended_soc_mod_interper = interp1d(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
df_blended['Voltage_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
bounds_error=False)
soc_vec = np.linspace(np.min(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
np.max(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
1001)
df_blended_soc_mod = pd.DataFrame(df_blended_soc_mod_interper(soc_vec), columns=['Voltage_aligned'])
df_blended_soc_mod['SOC_aligned'] = soc_vec / np.max(soc_vec) * 100
return df_blended_soc_mod
def get_electrode_info_ah(pe_out_zeroed, ne_out_zeroed):
"""
Calculates a variety of half-cell metrics at various positions in the full-cell profile.
Inputs:
pe_out_zeroed (Dataframe): cathode capacity and voltage columns scaled,
offset, and aligned along capacity
ne_out_zeroed (Dataframe): anode capacity and voltage columns scaled,
offset, and aligned along capacity
Outputs:
electrode_info_df (Dataframe): dataframe containing a variety of half-cell metrics
at various positions in the emulated full-cell profile.
pe_voltage_FC4p2V: voltage of the positive electrode (catahode) corresponding
to the full cell at 4.2V
...
pe_voltage_FC2p7V: voltage of the positive electrode (catahode) corresponding
to the full cell at 2.7V
pe_soc_FC4p2V: state of charge of the positive electrode corresponding
to the full cell at 4.2V
...
pe_soc_FC2p7V: state of charge of the positive electrode corresponding
to the full cell at 2.7V
ne_voltage_FC4p2V: voltage of the negative electrode (anode) corresponding
to the full cell at 4.2V
...
ne_voltage_FC2p7V: voltage of the negative electrode (anode) corresponding
to the full cell at 2.7V
ne_soc_FC4p2V: state of charge of the anode electrode corresponding
to the full cell at 4.2V
...
ne_soc_FC2p7V: state of charge of the anode electrode corresponding
to the full cell at 2.7V
Q_fc: capacity of the full cecll within the full cell voltage limits
q_pe: capacity of the cathode
q_ne: capacity of the anode [Ahr]
Q_li
"""
pe_minus_ne_zeroed = pd.DataFrame(pe_out_zeroed['Voltage_aligned'] - ne_out_zeroed['Voltage_aligned'],
columns=['Voltage_aligned'])
pe_minus_ne_zeroed['Q_aligned'] = pe_out_zeroed['Q_aligned']
electrode_info_df = pd.DataFrame(index=[0])
electrode_info_df['pe_voltage_FC4p2V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.2))].Voltage_aligned
electrode_info_df['pe_voltage_FC4p1V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.1))].Voltage_aligned
electrode_info_df['pe_voltage_FC4p0V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.0))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p9V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.9))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p8V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.8))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p7V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.7))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p6V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.6))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p5V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.5))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p4V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.4))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p3V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.3))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p2V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.2))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p1V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.1))].Voltage_aligned
electrode_info_df['pe_voltage_FC3p0V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.0))].Voltage_aligned
electrode_info_df['pe_voltage_FC2p9V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.9))].Voltage_aligned
electrode_info_df['pe_voltage_FC2p8V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.8))].Voltage_aligned
electrode_info_df['pe_voltage_FC2p7V'] = pe_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.7))].Voltage_aligned
electrode_info_df['pe_soc_FC4p2V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.2))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 4.2V
electrode_info_df['pe_soc_FC4p1V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.1))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 4.1V
electrode_info_df['pe_soc_FC4p0V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.0))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 4.0V
electrode_info_df['pe_soc_FC3p9V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.9))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.9V
electrode_info_df['pe_soc_FC3p8V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.8))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.8V
electrode_info_df['pe_soc_FC3p7V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.7))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.7V
electrode_info_df['pe_soc_FC3p6V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.6))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.6V
electrode_info_df['pe_soc_FC3p5V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.5))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.5V
electrode_info_df['pe_soc_FC3p4V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.4))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.4V
electrode_info_df['pe_soc_FC3p3V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.3))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.3V
electrode_info_df['pe_soc_FC3p2V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.2))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.2V
electrode_info_df['pe_soc_FC3p1V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.1))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.1V
electrode_info_df['pe_soc_FC3p0V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.0))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 3.0V
electrode_info_df['pe_soc_FC2p9V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.9))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 2.9V
electrode_info_df['pe_soc_FC2p8V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.8))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 2.8V
electrode_info_df['pe_soc_FC2p7V'] = (
(pe_out_zeroed.loc[np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 2.7))].Q_aligned -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()])) / (
np.max(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]) -
np.min(pe_out_zeroed['Q_aligned'].loc[~pe_out_zeroed['Voltage_aligned'].isna()]))
) # 2.7V
electrode_info_df['ne_voltage_FC4p2V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.2))].Voltage_aligned
electrode_info_df['ne_voltage_FC4p1V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.1))].Voltage_aligned
electrode_info_df['ne_voltage_FC4p0V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 4.0))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p9V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.9))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p8V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.8))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p7V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.7))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p6V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.6))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p5V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.5))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p4V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.4))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p3V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.3))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p2V'] = ne_out_zeroed.loc[
np.argmin(np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.2))].Voltage_aligned
electrode_info_df['ne_voltage_FC3p1V'] = ne_out_zeroed.loc[
np.argmin( | np.abs(pe_minus_ne_zeroed.Voltage_aligned - 3.1) | numpy.abs |
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
import pytest
import librosa
import soundpy as sp
test_dir = 'test_audio/'
test_audiofile = '{}audio2channels.wav'.format(test_dir)
test_traffic = '{}traffic.wav'.format(test_dir)
test_python = '{}python.wav'.format(test_dir)
test_horn = '{}car_horn.wav'.format(test_dir)
samples_48000, sr_48000 = librosa.load(test_audiofile, sr=48000)
samples_44100, sr_44100 = librosa.load(test_audiofile, sr=44100)
samples_22050, sr_22050 = librosa.load(test_audiofile, sr=22050)
samples_16000, sr_16000 = librosa.load(test_audiofile, sr=16000)
samples_8000, sr_8000 = librosa.load(test_audiofile, sr=8000)
def test_shape_samps_channels_mono():
input_data = np.array([1,2,3,4,5])
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_correct():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(5,2)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_incorrect():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(2,5)
output_data = sp.dsp.shape_samps_channels(input_data)
assert | np.array_equal(input_data.T, output_data) | numpy.array_equal |
##############################################################################
# This file is a part of PFFDTD.
#
# PFFTD is released under the MIT License.
# For details see the LICENSE file.
#
# Copyright 2021 <NAME>.
#
# File name: vox_grid_base.py
#
# Description: Class for a voxel-grid for ray-tri / tri-box intersections
# Uses multiprocessing
#
##############################################################################
import numpy as np
from numpy import array as npa
from common.timerdict import TimerDict
from common.tri_box_intersection import tri_box_intersection_vec
from common.room_geo import RoomGeo
from common.tris_precompute import tris_precompute
import multiprocessing as mp
from common.myfuncs import clear_dat_folder,clear_console
from common.myfuncs import get_default_nprocs
from tqdm import tqdm
import common.check_version as cv
import time
assert cv.ATLEASTVERSION38 #for shared memory (but project needs 3.9 anyway)
from multiprocessing import shared_memory
#base class for a voxel
class VoxBase:
def __init__(self,bmin,bmax):
self.bmin = bmin
self.bmax = bmax
self.tri_idxs = [] #triangle indices as list
self.tris_pre = None
self.tris_mat = None
#base class for a voxel grid
class VoxGridBase:
def __init__(self,room_geo):
tris = room_geo.tris
pts = room_geo.pts
tris_pre = room_geo.tris_pre
mats = room_geo.mat_ind
assert tris.ndim == 2
assert pts.ndim == 2
assert tris.shape[0] > tris.shape[1]
assert pts.shape[0] > pts.shape[1]
Npts = pts.shape[0]
Ntris = tris.shape[0]
self.tris = tris
self.tris_pre = tris_pre
self.mats = mats
self.pts = pts
self.Npts = Npts
self.Ntris = Ntris
self.voxels = []
self.nonempty_idx = []
self.timer = TimerDict()
self.nprocs = get_default_nprocs()
#fill the grid (primarily using tri-box intersections)
def fill(self,Nprocs=None):
if Nprocs is None:
Nprocs = self.nprocs
self.print(f'using {Nprocs} processes')
tris = self.tris
tris_pre = self.tris_pre
Ntris = self.Ntris
pts = self.pts
Nvox = self.Nvox
self.timer.tic('voxgrid fill')
tri_pts = tris_pre['v']
tri_bmin = tris_pre['bmin']
tri_bmax = tris_pre['bmax']
if Nvox==1:
vox = self.voxels[0]
vox.tri_idxs = np.arange(Ntris)
vox.tris_pre = self.tris_pre
vox.tris_mat = self.mats
self.nonempty_idx = [0]
else:
if Nprocs>1:
clear_dat_folder('mmap_dat')
#create shared memory
Ntris_vox_shm = shared_memory.SharedMemory(create=True,size=Nvox*np.dtype(np.int64).itemsize)
Ntris_vox = np.frombuffer(Ntris_vox_shm.buf, dtype=np.int64)
#alternative syntax
#Ntris_vox = np.ndarray((Nvox,), dtype=np.int64, buffer=Ntris_vox_shm.buf)
#use as buffer view to np array
N_tribox_tests_shm = shared_memory.SharedMemory(create=True,size=Nvox*np.dtype(np.int64).itemsize)
N_tribox_tests = | np.frombuffer(N_tribox_tests_shm.buf, dtype=np.int64) | numpy.frombuffer |
###
# regular_turbo.py
# This file contains the defintions for processing the cars
# on the regular and the turbo roundabout.
###
import numpy as np
from utils import (DEBUG, random_row, NORTH, EAST, SOUTH, WEST)
from numpy.random import RandomState
import sys
def exception_handling(self, car):
"""Handles special cases for turning cars, depending on the type of roundabout.
Arguments:
car {Car} -- The car that should drive.
Returns:
int -- The state of the position of the car, depending on the exception.
"""
if self.model.name == "Regular":
for i in range(0, 2):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == SOUTH:
return 7
else:
return 5
for i in range(2, 4):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == EAST:
return 7
else:
return 5
for i in range(4, 6):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == NORTH:
return 7
else:
return 5
for i in range(6, 8):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == WEST:
return 7
else:
return 5
elif self.model.name == "Turbo":
for i in range(2):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == EAST:
return 7
else:
return 5
if np.array_equal(car.cur_pos, self.exceptions[2]):
if car.orientation == SOUTH:
return 7
else:
return 5
for i in range(3, 5):
if np.array_equal(car.cur_pos, self.exceptions[i]):
if car.orientation == WEST:
return 7
else:
return 5
if np.array_equal(car.cur_pos, self.exceptions[5]):
if car.orientation == NORTH:
return 7
else:
return 5
def process_cars_reg(self):
"""Process the cars.
The next step of a car depends on the state of the current cell it is on.
"""
self.cars_on_round = []
self.cars_not_round = []
# Define which cars are on the roundabout.
if not self.collision():
for car in self.cars:
if list(car.cur_pos) in self.model.area:
self.cars_on_round.append(car)
else:
self.cars_not_round.append(car)
# If there is a collision, print out the position of it and exit.
else:
cur_pos = [car.cur_pos for car in self.cars]
pos, count = np.unique(cur_pos, axis=0, return_counts=True)
print(pos[count > 1])
sys.exit('Cars overlap')
# Let the cars on the roundabout drive first.
for car in self.cars_on_round:
drive_roundabout(self, car)
for car in self.cars_not_round:
drive_outside(self, car)
def drive_roundabout(self, car, wait_ctr=2):
"""Let a car drive on the roundabout.
Arguments:
car {Car} -- The car that should drive.
"""
row, col = car.cur_pos
state = self.model.grid[row][col]
# Depending on the asshole factor, stand still for 3 time steps.
stand = RandomState().binomial(1, p=car.asshole_factor)
if stand or (car.asshole_ctr > 0 and car.asshole_ctr < 4):
car.asshole_ctr += 1
else:
# State 8 defines the exceptions
if state == 8:
state = exception_handling(self, car)
if state == 3:
if self.priority(car, car.look_left()):
car.turn_left()
car.drive()
elif state == 5:
if self.priority(car, car.orientation):
car.drive()
elif state == 6:
car.turn_ctr += 1
prob = car.turn_ctr * (1/4)
if prob > 1:
prob = 1
# Checks if car stands still for more than 3 turns
if car.prev_pos[1] == wait_ctr:
prob = 1-prob
car.prev_pos[1] = 0
self.waiting_cars += 1
turn = RandomState().binomial(1, p=prob)
if turn == 1:
if self.priority(car, car.look_right()):
car.turn_right()
car.drive()
else:
car.turn_ctr = 3
else:
if self.priority(car, car.orientation):
car.drive()
elif state == 7:
if self.priority(car, car.look_right()):
car.turn_right()
car.drive()
elif state == 9:
if car.switch_ctr == 0 and self.priority(car, car.look_right()):
if | RandomState() | numpy.random.RandomState |
import pytest
import sys
sys.path.insert(0,"..")
import autogenes as ag
import numpy as np
import pandas as pd
import anndata
from sklearn.svm import NuSVR
from sklearn import linear_model
from scipy.optimize import nnls
def test_unpack_bulk():
unpack_bulk = ag.main._Interface__unpack_bulk
arr = | np.ones((3,)) | numpy.ones |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import logging
import numpy
import sys
from collections import OrderedDict
from six.moves import zip
import numpy as np
import Ska.Numpy
from Chandra.Time import DateTime
import Ska.tdb
from . import units
MODULE = sys.modules[__name__]
logger = logging.getLogger('engarchive')
class NoValidDataError(Exception):
pass
class DataShapeError(Exception):
pass
def quality_index(dat, colname):
"""Return the index for `colname` in `dat`"""
colname = colname.split(':')[0]
return list(dat.dtype.names).index(colname)
def numpy_converter(dat):
return Ska.Numpy.structured_array(dat, colnames=dat.dtype.names)
def convert(dat, content):
# Zero-length file results in `dat is None`
if dat is None:
raise NoValidDataError
try:
converter = getattr(MODULE, content.lower())
except AttributeError:
converter = numpy_converter
return converter(dat)
def generic_converter(prefix=None, add_quality=False, aliases=None):
"""Convert an input FITS recarray assuming that it has a TIME column.
If ``add_prefix`` is set then add ``content_`` as a prefix
to the data column names. If ``add_quality`` is set then add a QUALITY
column with all values False.
"""
def _convert(dat):
colnames = dat.dtype.names
colnames_out = [x.upper() for x in colnames]
if aliases:
colnames_out = [aliases.get(x, x).upper() for x in colnames_out]
if prefix:
# Note to self: never change an enclosed reference, i.e. don't do
# prefix = prefix.upper() + '_'
# You will lose an hour again figuring this out if so.
PREFIX = prefix.upper() + '_'
colnames_out = [(x if x in ('TIME', 'QUALITY') else PREFIX + x)
for x in colnames_out]
arrays = [dat.field(x) for x in colnames]
if add_quality:
descrs = [(x,) + y[1:] for x, y in zip(colnames_out, dat.dtype.descr)]
quals = numpy.zeros((len(dat), len(colnames) + 1), dtype=numpy.bool)
descrs += [('QUALITY', numpy.bool, (len(colnames) + 1,))]
arrays += [quals]
else:
descrs = [(name, array.dtype.str, array.shape[1:])
for name, array in zip(colnames_out, arrays)]
return numpy.rec.fromarrays(arrays, dtype=descrs)
return _convert
def get_bit_array(dat, in_name, out_name, bit_index):
bit_indexes = [int(bi) for bi in bit_index.split(',')]
bit_index = max(bit_indexes)
if dat[in_name].shape[1] < bit_index:
raise DataShapeError('column {} has shape {} but need at least {}'
.format(in_name, dat[in_name].shape[1], bit_index + 1))
if len(bit_indexes) > 1:
mult = 1
out_array = np.zeros(len(dat), dtype=np.uint32) # no more than 32 bit indexes
for bit_index in reversed(bit_indexes):
# Note: require casting mult and 0 to uint32 because recent versions of numpy
# disallow in-place adding of int64 to uint32.
out_array += np.where(dat[in_name][:, bit_index], np.uint32(mult), np.uint32(0))
mult *= 2
else:
try:
tscs = Ska.tdb.msids[out_name].Tsc
scs = {tsc['LOW_RAW_COUNT']: tsc['STATE_CODE'] for tsc in tscs}
except (KeyError, AttributeError):
scs = ['OFF', 'ON ']
# CXC telemetry stores state code vals with trailing spaces so all match
# in length. Annoying, but reproduce this here for consistency so
# fetch Msid.raw_vals does the right thing.
max_len = max(len(sc) for sc in scs.values())
fmtstr = '{:' + str(max_len) + 's}'
scs = [fmtstr.format(val) for key, val in scs.items()]
out_array = np.where(dat[in_name][:, bit_index], scs[1], scs[0])
return out_array
def generic_converter2(msid_cxc_map, default_dtypes=None):
"""Convert an input FITS recarray assuming that it has a TIME column. Use the
``msid_cxc_map`` to define the list of output eng archive MSIDs (keys) and the
corresponding colnames in the CXC archive FITS file (values).
The CXC values can contain an optional bit specifier in the form <colname>:<N>
where N is the bit selector referenced from 0 as the leftmost bit.
:param msid_cxc_map: dict of out_name => in_name mapping
"""
def _convert(dat):
# Make quality bool array with entries for TIME, QUALITY, then all other cols
out_names = ['TIME', 'QUALITY'] + list(msid_cxc_map.keys())
out_quality = np.zeros(shape=(len(dat), len(out_names)), dtype=np.bool)
out_arrays = {'TIME': dat['TIME'],
'QUALITY': out_quality}
for out_name, in_name in msid_cxc_map.items():
if ':' in in_name:
in_name, bit_index = in_name.split(':')
out_array = get_bit_array(dat, in_name, out_name, bit_index)
quality = dat['QUALITY'][:, quality_index(dat, in_name)]
else:
if in_name in dat.dtype.names:
out_array = dat[in_name]
quality = dat['QUALITY'][:, quality_index(dat, in_name)]
else:
# Handle column that is intermittently available in `dat` by using the
# supplied default dtype. Quality is True (missing) everywhere.
out_array = np.zeros(shape=len(dat), dtype=default_dtypes[out_name])
quality = True
assert out_array.ndim == 1
out_arrays[out_name] = out_array
out_quality[:, out_names.index(out_name)] = quality
out = Ska.Numpy.structured_array(out_arrays, out_names)
return out
return _convert
orbitephem0 = generic_converter('orbitephem0', add_quality=True)
lunarephem0 = generic_converter('lunarephem0', add_quality=True)
solarephem0 = generic_converter('solarephem0', add_quality=True)
orbitephem1 = generic_converter('orbitephem1', add_quality=True)
lunarephem1 = generic_converter('lunarephem1', add_quality=True)
solarephem1 = generic_converter('solarephem1', add_quality=True)
angleephem = generic_converter(add_quality=True)
def parse_alias_str(alias_str, invert=False):
aliases = OrderedDict()
for line in alias_str.strip().splitlines():
cxcmsid, msid = line.split()[:2]
if invert:
aliases[msid] = cxcmsid
else:
aliases[cxcmsid] = msid
return aliases
ALIASES = {'simdiag': """
RAMEXEC 3SDSWELF SEA CSC Exectuting from RAM
DSTACKPTR 3SDPSTKP SEA Data Stack Ptr
TSCEDGE 3SDTSEDG TSC Tab Edge Detection Flags
FAEDGE 3SDFAEDG FA Tab Edge Detection Flags
MJFTIME 3SDMAJFP Major Frame Period Time Measured by SEA
MRMDEST 3SDRMOVD Most Recent Motor Move Destination
TSCTABADC 3SDTSTSV TSC Tab Position Sensor A/D converter
FATABADC 3SDFATSV FA Tab Position Sensor A/D Converter
AGRNDADC 3SDAGV Analog Ground A/D Converter Reading
P15VADC 3SDP15V +15V Power Supply A/D Converter Reading
P5VADC 3SDP5V +5V Power Supply A/D Converter Reading
N15VADC 3SDM15V -15V Power Supply A/D Converter Reading
FLEXATEMPADC 3SDFLXAT Flexture A Thermistor A/D Converter
FLEXBTEMPADC 3SDFLXBT Flexture B Thermistor A/D Converter
FLEXCTEMPADC 3SDFLXCT Flexture C Thermistor A/D Converter
TSCMTRTEMPADC 3SDTSMT TSC Motor Thermistor A/D Converter
FAMTRTEMPADC 3SDFAMT FA Motor Thermistor A/D Converter
PSUTEMPADC 3SDPST SEA Power Supply Thermistor A/D Converter
BOXTEMPADC 3SDBOXT SEA Box Thermistor A/D Converter
RAMFAILADDR 3SDRMFAD RAM Most Recent detected Fail Address
TSCTABWID 3SDTSTBW TSC Most Recent detected Tab Width
FATABWID 3SDFATBW FA Most Recent detected Tab Width
SYNCLOSS 3SDSYRS Process Reset Due Synchronization Loss
WARMRESET 3SDWMRS Processor Warm Reset
TSCHISTO 3SDTSP TSC Most Recent PWM Histogram
FAHISTO 3SDFAP FA Most Recent PWM Histogram
INVCMDCODE 3SDINCOD SEA Invalid CommandCode
""",
'sim_mrg': """
TLMUPDATE 3SEATMUP "Telemtry Update Flag"
SEAIDENT 3SEAID "SEA Identification Flag"
SEARESET 3SEARSET "SEA Reset Flag"
PROMFAIL 3SEAROMF "SEA PROM Checksum Flag"
INVCMDGROUP 3SEAINCM "SEA Invalid Command Group Flag"
TSCMOVING 3TSCMOVE "TSC In Motion Flag"
FAMOVING 3FAMOVE "FA In Motion Flag"
FAPOS 3FAPOS "FA Position"
TSCPOS 3TSCPOS "TSC Postion"
PWMLEVEL 3MRMMXMV "Max Power Motor Volt recent move"
LDRTMECH 3LDRTMEK "Last Detected Reference Mechanism Tab"
LDRTNUM 3LDRTNO "Last Detected Reference Tab Number"
LDRTRELPOS 3LDRTPOS "Last Detected Reference Relative Postion"
FLEXATEMP 3FAFLAAT "Flexture A Temperature"
FLEXBTEMP 3FAFLBAT "Flexture B Temperature"
FLEXCTEMP 3FAFLCAT "Flexture C Temperature"
TSCMTRTEMP 3TRMTRAT "TSC Motor Temperature"
FAMTRTEMP 3FAMTRAT "FA Motor Temperature"
PSUTEMP 3FAPSAT "SEA Power Supply Temperature"
BOXTEMP 3FASEAAT "SEA Box Temperature"
STALLCNT 3SMOTSTL "SEA Motor Stall Counter"
TAB2AUTOPOS 3STAB2EN "SEA Tab 2 Auto Position Update Status"
MTRDRVRLY 3SMOTPEN "SEA Motor Driver Power Relay status"
MTRSELRLY 3SMOTSEL "SEA Motor Selection Relay Status"
HTRPWRRLY 3SHTREN "SEA Heater Power Relay Status"
RAMFAIL 3SEARAMF "SEA RAM Failure Detected Flag"
MTROVRCCNT 3SMOTOC "Motor Drive Overcurrent Counter"
PENDCMDCNT 3SPENDC "SEA Pending Command Count"
FLEXATSET 3SFLXAST "Flexture A Temperature Setpoint"
FLEXBTSET 3SFLXBST "Flexture B Temperature Setpoint"
FLEXCTSET 3SFLXCST "Flexture C Temperature Setpoint"
""",
'hrc0ss': """
TLEVART 2TLEV1RT
VLEVART 2VLEV1RT
SHEVART 2SHEV1RT
TLEVART 2TLEV2RT
VLEVART 2VLEV2RT
SHEVART 2SHEV2RT
""",
'hrc0hk': """
SCIDPREN:0,1,2,3,8,9,10 HRC_SS_HK_BAD
P24CAST:7 224PCAST
P15CAST:7 215PCAST
N15CAST:7 215NCAST
SPTPAST 2SPTPAST
SPBPAST 2SPBPAST
IMTPAST 2IMTPAST
IMBPAST 2IMBPAST
MTRSELCT:3 2NYMTAST
MTRSELCT:4 2PYMTAST
MTRSELCT:5 2CLMTAST
MTRSELCT:6 2DRMTAST
MTRSELCT:7 2ALMTAST
MTRSTATR:0 2MSMDARS
MTRSTATR:1 2MDIRAST
MTRSTATR:2 2MSNBAMD
MTRSTATR:3 2MSNAAMD
MTRSTATR:4 2MSLBAMD
MTRSTATR:5 2MSLAAMD
MTRSTATR:6 2MSPRAMD
MTRSTATR:7 2MSDRAMD
MTRCMNDR:0 2MCMDARS
MTRCMNDR:2 2MCNBAMD
MTRCMNDR:3 2MCNAAMD
MTRCMNDR:4 2MCLBAMD
MTRCMNDR:5 2MCLAAMD
MTRCMNDR:6 2MCPRAMD
MTRCMNDR:7 2MDRVAST
SCTHAST 2SCTHAST
MTRITMP:1 2SMOIAST
MTRITMP:2 2SMOTAST
MTRITMP:5 2DROTAST
MTRITMP:6 2DROIAST
MLSWENBL:3 2SFLGAST
MLSWENBL:4 2OSLSAST
MLSWENBL:5 2OPLSAST
MLSWENBL:6 2CSLSAST
MLSWENBL:7 2CPLSAST
MLSWSTAT:2 2OSLSADT
MLSWSTAT:3 2OSLSAAC
MLSWSTAT:4 2OPLSAAC
MLSWSTAT:5 2CSLSADT
MLSWSTAT:6 2CSLSAAC
MLSWSTAT:7 2CPLSAAC
FCPUAST 2FCPUAST
FCPVAST 2FCPVAST
CBHUAST 2CBHUAST
CBLUAST 2CBLUAST
CBHVAST 2CBHVAST
CBLVAST 2CBLVAST
WDTHAST 2WDTHAST
SCIDPREN:4 2CLMDAST
SCIDPREN:5 2FIFOAVR
SCIDPREN:6 2OBNLASL
SCIDPREN:7 2SPMDASL
SCIDPREN:11 2EBLKAVR
SCIDPREN:12 2CBLKAVR
SCIDPREN:13 2ULDIAVR
SCIDPREN:14 2WDTHAVR
SCIDPREN:15 2SHLDAVR
HVPSSTAT:0 2SPONST
HVPSSTAT:1 2SPCLST
HVPSSTAT:2 2S1ONST
HVPSSTAT:3 2IMONST
HVPSSTAT:4 2IMCLST
HVPSSTAT:5 2S2ONST
S1HVST 2S1HVST
S2HVST 2S2HVST
C05PALV 2C05PALV
C15PALV 2C15PALV
C15NALV 2C15NALV
C24PALV 2C24PALV
IMHVLV 2IMHVLV
IMHBLV 2IMHBLV
SPHVLV 2SPHVLV
SPHBLV 2SPHBLV
S1HVLV 2S1HVLV
S2HVLV 2S2HVLV
PRBSCR 2PRBSCR
PRBSVL 2PRBSVL
ULDIALV 2ULDIALV
LLDIALV 2LLDIALV
FEPRATM 2FEPRATM
CALPALV 2CALPALV
GRDVALV 2GRDVALV
RSRFALV 2RSRFALV
SPINATM 2SPINATM
IMINATM 2IMINATM
LVPLATM 2LVPLATM
SPHVATM 2SPHVATM
IMHVATM 2IMHVATM
SMTRATM 2SMTRATM
FE00ATM 2FE00ATM
CE00ATM 2CE00ATM
CE01ATM 2CE01ATM
""",
}
CXC_TO_MSID = {key: parse_alias_str(val) for key, val in ALIASES.items()}
MSID_TO_CXC = {key: parse_alias_str(val, invert=True) for key, val in ALIASES.items()}
def sim_mrg(dat):
"""
Custom converter for SIM_MRG.
There is a bug in CXCDS L0 SIM decom wherein the 3LDRTMEK MSID is
incorrectly assigned (TSC and FA are reversed). The calibration
of 3LDRTPOS from steps to mm is then also wrong because it uses
the FA conversion instead of TSC.
This function fixes 3LDRTMEK, then backs out the (incorrect) 3LDRTPOS
steps to mm conversion and re-does it correctly using the TSC conversion.
Note that 3LDRTMEK is (by virtue of the way mission operations run)
always "TSC".
"""
# Start with the generic converter
out = generic_converter(aliases=CXC_TO_MSID['sim_mrg'])(dat)
# Now do the fixes. FOT mech has stated that 3LDRTMEK is always 'FA'
# in practice.
bad = out['3LDRTMEK'] == b'FA '
if np.count_nonzero(bad):
out['3LDRTMEK'][bad] = b'TSC'
pos_tsc_steps = units.converters['mm', 'FASTEP'](out['3LDRTPOS'][bad])
out['3LDRTPOS'][bad] = units.converters['TSCSTEP', 'mm'](pos_tsc_steps)
return out
simdiag = generic_converter(aliases=CXC_TO_MSID['simdiag'])
hrc0ss = generic_converter2(MSID_TO_CXC['hrc0ss'])
def hrc0hk(dat):
# Read the data and allow for missing columns in input L0 HK file.
default_dtypes = {'2CE00ATM': 'f4',
'2CE01ATM': 'f4'}
out = generic_converter2(MSID_TO_CXC['hrc0hk'], default_dtypes)(dat)
# Set all HRC HK data columns to bad quality where HRC_SS_HK_BAD is not zero
# First three columns are TIME, QUALITY, and HRC_SS_HK_BAD -- do not filter these.
bad = out['HRC_SS_HK_BAD'] > 0
if np.any(bad):
out['QUALITY'][bad, 3:] = True
logger.info('Setting {} readouts of all HRC HK telem to bad quality (bad SCIDPREN)'
.format(np.count_nonzero(bad)))
# Detect the secondary-science byte-shift anomaly by finding out-of-range 2SMTRATM values.
# For those bad frames:
# - Set bit 10 (from LSB) of HRC_SS_HK_BAD
# - Set all analog MSIDs (2C05PALV and later in the list) to bad quality
bad = (out['2SMTRATM'] < -20) | (out['2SMTRATM'] > 50)
if np.any(bad):
out['HRC_SS_HK_BAD'][bad] |= 2 ** 10 # 1024
analogs_index0 = list(out.dtype.names).index('2C05PALV')
out['QUALITY'][bad, analogs_index0:] = True
logger.info('Setting {} readouts of analog HRC HK telem to bad quality (bad 2SMTRATM)'
.format(np.count_nonzero(bad)))
return out
def obc4eng(dat):
"""
At 2014:342:XX:XX:XX, patch PR-361 was applied which transitioned 41 OBA thermistors to
read out in wide-mode. After this time the data in the listed OOBTHRxx MSIDs became
invalid while the OOBTHRxx_WIDE MSIDs became valid. This converter simply copies the
*_WIDE values to the original MSIDs after the time of patch activation. The *_WIDE
MSIDs are not available in the eng archive (by the _WIDE names).
"""
# MSIDs OOBTHR<msid_num> that went to _WIDE after the patch, which was done in parts A
# and B.
msid_nums = {'a': '08 09 10 11 12 13 14 15 17 18 19 20 21 22 23 24 25 26 27 28 29'.split(),
'b': '30 31 33 34 35 36 37 38 39 40 41 44 45 46 49 50 51 52 53 54'.split(),
'c': '02 03 04 05 06 07'.split()
}
# Convert using the baseline converter
out = numpy_converter(dat)
# The patch times below correspond to roughly the middle of the major frame where
# patches A and B were applied, respectively.
patch_times = {'a': DateTime('2014:342:16:29:30').secs,
'b': DateTime('2014:342:16:32:45').secs,
'c': DateTime('2017:312:16:11:16').secs}
for patch in ('a', 'b', 'c'):
# Set a mask defining times after the activation of wide-range telemetry in PR-361
mask = out['TIME'] > patch_times[patch]
if | np.any(mask) | numpy.any |
import numpy as np
import openmdao.api as om
import wisdem.commonse.utilities as util
import wisdem.pyframe3dd.pyframe3dd as pyframe3dd
import wisdem.commonse.utilization_dnvgl as util_dnvgl
import wisdem.commonse.utilization_constraints as util_con
from wisdem.commonse import NFREQ, gravity
from wisdem.floatingse.member import NULL, MEMMAX, Member
NNODES_MAX = 1000
NELEM_MAX = 1000
RIGID = 1e30
EPS = 1e-6
class PlatformFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:nodes_r", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_D", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_t", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input(f"member{k}:section_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:idx_cb", 0)
self.add_input(f"member{k}:buoyancy_force", 0.0, units="N")
self.add_input(f"member{k}:displacement", 0.0, units="m**3")
self.add_input(f"member{k}:center_of_buoyancy", np.zeros(3), units="m")
self.add_input(f"member{k}:center_of_mass", np.zeros(3), units="m")
self.add_input(f"member{k}:ballast_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_cost", 0.0, units="USD")
self.add_input(f"member{k}:I_total", np.zeros(6), units="kg*m**2")
self.add_input(f"member{k}:Awater", 0.0, units="m**2")
self.add_input(f"member{k}:Iwater", 0.0, units="m**4")
self.add_input(f"member{k}:added_mass", np.zeros(6), units="kg")
self.add_input(f"member{k}:waterline_centroid", np.zeros(2), units="m")
self.add_input(f"member{k}:variable_ballast_capacity", val=0.0, units="m**3")
self.add_input(f"member{k}:Px", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Py", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Pz", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:qdyn", np.zeros(MEMMAX), units="Pa")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_cost", 0.0, units="USD")
self.add_output("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_output("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_discrete_output("platform_elem_memid", [-1] * NELEM_MAX)
self.add_output("platform_displacement", 0.0, units="m**3")
self.add_output("platform_center_of_buoyancy", np.zeros(3), units="m")
self.add_output("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_centroid", np.zeros(3), units="m")
self.add_output("platform_ballast_mass", 0.0, units="kg")
self.add_output("platform_hull_mass", 0.0, units="kg")
self.add_output("platform_mass", 0.0, units="kg")
self.add_output("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_output("platform_cost", 0.0, units="USD")
self.add_output("platform_Awater", 0.0, units="m**2")
self.add_output("platform_Iwater", 0.0, units="m**4")
self.add_output("platform_added_mass", np.zeros(6), units="kg")
self.add_output("platform_variable_capacity", np.zeros(n_member), units="m**3")
self.node_mem2glob = {}
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Seems like we have to run this each time as numbering can change during optimization
self.node_mem2glob = {}
self.set_connectivity(inputs, outputs)
self.set_node_props(inputs, outputs)
self.set_element_props(inputs, outputs, discrete_inputs, discrete_outputs)
def set_connectivity(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
nodes_temp = np.empty((0, 3))
elem_n1 = np.array([], dtype=np.int_)
elem_n2 = np.array([], dtype=np.int_)
# Look over members and grab all nodes and internal connections
for k in range(n_member):
inode_xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(inode_xyz[:, 0] == NULL)[0][0]
inode_xyz = inode_xyz[:inodes, :]
inode_range = np.arange(inodes - 1)
n = nodes_temp.shape[0]
for ii in range(inodes):
self.node_mem2glob[(k, ii)] = n + ii
elem_n1 = np.append(elem_n1, n + inode_range)
elem_n2 = np.append(elem_n2, n + inode_range + 1)
nodes_temp = np.append(nodes_temp, inode_xyz, axis=0)
# Reveal connectivity by using mapping to unique node positions
nodes, idx, inv = np.unique(nodes_temp.round(8), axis=0, return_index=True, return_inverse=True)
nnode = nodes.shape[0]
outputs["platform_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_nodes"][:nnode, :] = nodes
outputs["platform_centroid"] = nodes.mean(axis=0)
# Use mapping to set references to node joints
nelem = elem_n1.size
outputs["platform_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n1"][:nelem] = inv[elem_n1]
outputs["platform_elem_n2"][:nelem] = inv[elem_n2]
# Update global 2 member mappings
for k in self.node_mem2glob.keys():
self.node_mem2glob[k] = inv[self.node_mem2glob[k]]
def set_node_props(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Number of valid nodes
node_platform = outputs["platform_nodes"]
nnode = np.where(node_platform[:, 0] == NULL)[0][0]
node_platform = node_platform[:nnode, :]
# Find greatest radius of all members at node intersections
Rnode = np.zeros(nnode)
for k in range(n_member):
irnode = inputs[f"member{k}:nodes_r"]
n = np.where(irnode == NULL)[0][0]
for ii in range(n):
iglob = self.node_mem2glob[(k, ii)]
Rnode[iglob] = np.array([Rnode[iglob], irnode[ii]]).max()
# Find forces on nodes
Fnode = np.zeros((nnode, 3))
for k in range(n_member):
icb = int(inputs[f"member{k}:idx_cb"])
iglob = self.node_mem2glob[(k, icb)]
Fnode[iglob, 2] += inputs[f"member{k}:buoyancy_force"]
# Get transition piece inertial properties
itrans_platform = util.closest_node(node_platform, inputs["transition_node"])
m_trans = float(inputs["transition_piece_mass"])
r_trans = Rnode[itrans_platform]
I_trans = m_trans * r_trans ** 2.0 * np.r_[0.5, 0.5, 1.0, np.zeros(3)]
outputs["transition_piece_I"] = I_trans
# Store outputs
outputs["platform_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["platform_Rnode"][:nnode] = Rnode
outputs["platform_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_Fnode"][:nnode, :] = Fnode
def set_element_props(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
elem_D = np.array([])
elem_t = np.array([])
elem_A = np.array([])
elem_Asx = np.array([])
elem_Asy = np.array([])
elem_Ixx = np.array([])
elem_Iyy = np.array([])
elem_Izz = np.array([])
elem_rho = np.array([])
elem_E = np.array([])
elem_G = np.array([])
elem_sigy = np.array([])
elem_Px1 = np.array([])
elem_Px2 = np.array([])
elem_Py1 = np.array([])
elem_Py2 = np.array([])
elem_Pz1 = np.array([])
elem_Pz2 = np.array([])
elem_qdyn = np.array([])
elem_memid = np.array([], dtype=np.int_)
mass = 0.0
m_ball = 0.0
cost = 0.0
volume = 0.0
Awater = 0.0
Iwater = 0.0
m_added = np.zeros(6)
cg_plat = np.zeros(3)
cb_plat = np.zeros(3)
centroid = outputs["platform_centroid"][:2]
variable_capacity = np.zeros(n_member)
# Append all member data
for k in range(n_member):
n = np.where(inputs[f"member{k}:section_A"] == NULL)[0][0]
elem_D = np.append(elem_D, inputs[f"member{k}:section_D"][:n])
elem_t = np.append(elem_t, inputs[f"member{k}:section_t"][:n])
elem_A = np.append(elem_A, inputs[f"member{k}:section_A"][:n])
elem_Asx = np.append(elem_Asx, inputs[f"member{k}:section_Asx"][:n])
elem_Asy = np.append(elem_Asy, inputs[f"member{k}:section_Asy"][:n])
elem_Ixx = np.append(elem_Ixx, inputs[f"member{k}:section_Ixx"][:n])
elem_Iyy = np.append(elem_Iyy, inputs[f"member{k}:section_Iyy"][:n])
elem_Izz = np.append(elem_Izz, inputs[f"member{k}:section_Izz"][:n])
elem_rho = np.append(elem_rho, inputs[f"member{k}:section_rho"][:n])
elem_E = np.append(elem_E, inputs[f"member{k}:section_E"][:n])
elem_G = np.append(elem_G, inputs[f"member{k}:section_G"][:n])
elem_sigy = np.append(elem_sigy, inputs[f"member{k}:section_sigma_y"][:n])
elem_qdyn = np.append(elem_qdyn, inputs[f"member{k}:qdyn"][:n])
elem_memid = np.append(elem_memid, k * np.ones(n, dtype=np.int_))
# The loads should come in with length n+1
elem_Px1 = np.append(elem_Px1, inputs[f"member{k}:Px"][:n])
elem_Px2 = np.append(elem_Px2, inputs[f"member{k}:Px"][1 : (n + 1)])
elem_Py1 = np.append(elem_Py1, inputs[f"member{k}:Py"][:n])
elem_Py2 = np.append(elem_Py2, inputs[f"member{k}:Py"][1 : (n + 1)])
elem_Pz1 = np.append(elem_Pz1, inputs[f"member{k}:Pz"][:n])
elem_Pz2 = np.append(elem_Pz2, inputs[f"member{k}:Pz"][1 : (n + 1)])
# Mass, volume, cost tallies
imass = inputs[f"member{k}:total_mass"]
ivol = inputs[f"member{k}:displacement"]
mass += imass
volume += ivol
cost += inputs[f"member{k}:total_cost"]
m_ball += inputs[f"member{k}:ballast_mass"]
Awater_k = inputs[f"member{k}:Awater"]
Awater += Awater_k
Rwater2 = np.sum((inputs[f"member{k}:waterline_centroid"] - centroid) ** 2)
Iwater += inputs[f"member{k}:Iwater"] + Awater_k * Rwater2
m_added += inputs[f"member{k}:added_mass"]
variable_capacity[k] = inputs[f"member{k}:variable_ballast_capacity"]
# Center of mass / buoyancy tallies
cg_plat += imass * inputs[f"member{k}:center_of_mass"]
cb_plat += ivol * inputs[f"member{k}:center_of_buoyancy"]
# Add transition piece
m_trans = inputs["transition_piece_mass"]
cg_trans = inputs["transition_node"]
I_trans = util.assembleI(outputs["transition_piece_I"])
mass += m_trans
cost += inputs["transition_piece_cost"]
cg_plat += m_trans * cg_trans
# Finalize outputs
cg_plat /= mass
cb_plat /= volume
# With CG known, loop back through to compute platform I
unit_z = np.array([0.0, 0.0, 1.0])
I_hull = np.zeros((3, 3))
for k in range(n_member):
xyz_k = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz_k[:, 0] == NULL)[0][0]
xyz_k = xyz_k[:inodes, :]
imass = inputs[f"member{k}:total_mass"]
cg_k = inputs[f"member{k}:center_of_mass"]
R = cg_plat - cg_k
# Figure out angle to make member parallel to global c.s.
vec_k = xyz_k[-1, :] - xyz_k[0, :]
T = util.rotate_align_vectors(vec_k, unit_z)
# Rotate member inertia tensor
I_k = util.assembleI(inputs[f"member{k}:I_total"])
I_k_rot = T @ I_k @ T.T
# Now do parallel axis theorem
I_hull += np.array(I_k_rot) + imass * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Add in transition piece
R = cg_plat - cg_trans
I_hull += I_trans + m_trans * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Store outputs
nelem = elem_A.size
outputs["platform_elem_D"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_t"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_A"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Ixx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Iyy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Izz"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_rho"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_E"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_G"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_sigma_y"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_qdyn"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_D"][:nelem] = elem_D
outputs["platform_elem_t"][:nelem] = elem_t
outputs["platform_elem_A"][:nelem] = elem_A
outputs["platform_elem_Asx"][:nelem] = elem_Asx
outputs["platform_elem_Asy"][:nelem] = elem_Asy
outputs["platform_elem_Ixx"][:nelem] = elem_Ixx
outputs["platform_elem_Iyy"][:nelem] = elem_Iyy
outputs["platform_elem_Izz"][:nelem] = elem_Izz
outputs["platform_elem_rho"][:nelem] = elem_rho
outputs["platform_elem_E"][:nelem] = elem_E
outputs["platform_elem_G"][:nelem] = elem_G
outputs["platform_elem_sigma_y"][:nelem] = elem_sigy
outputs["platform_elem_Px1"][:nelem] = elem_Px1
outputs["platform_elem_Px2"][:nelem] = elem_Px2
outputs["platform_elem_Py1"][:nelem] = elem_Py1
outputs["platform_elem_Py2"][:nelem] = elem_Py2
outputs["platform_elem_Pz1"][:nelem] = elem_Pz1
outputs["platform_elem_Pz2"][:nelem] = elem_Pz2
outputs["platform_elem_qdyn"][:nelem] = elem_qdyn
discrete_outputs["platform_elem_memid"] = elem_memid
outputs["platform_mass"] = mass
outputs["platform_ballast_mass"] = m_ball
outputs["platform_hull_mass"] = mass - m_ball
outputs["platform_cost"] = cost
outputs["platform_displacement"] = volume
outputs["platform_hull_center_of_mass"] = cg_plat
outputs["platform_center_of_buoyancy"] = cb_plat
outputs["platform_I_hull"] = util.unassembleI(I_hull)
outputs["platform_Awater"] = Awater
outputs["platform_Iwater"] = Iwater
outputs["platform_added_mass"] = m_added
outputs["platform_variable_capacity"] = variable_capacity
class TowerPreMember(om.ExplicitComponent):
def setup(self):
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("tower_height", 0.0, units="m")
self.add_output("tower_top_node", np.zeros(3), units="m")
def compute(self, inputs, outputs):
transition_node = inputs["transition_node"]
tower_top_node = 0 # previous code altered the original definition of transition_node
tower_top_node += transition_node
tower_top_node[2] += float(inputs["tower_height"])
outputs["tower_top_node"] = tower_top_node
class PlatformTowerFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_input("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_input("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_input("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_input("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_output("tower_Fnode", copy_shape="tower_nodes", units="N")
self.add_input("tower_Rnode", NULL * np.ones(MEMMAX), units="m")
self.add_output("tower_elem_n1", copy_shape="tower_elem_A")
self.add_output("tower_elem_n2", copy_shape="tower_elem_A")
self.add_output("tower_elem_L", copy_shape="tower_elem_A", units="m")
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_Px", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Pz", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_qdyn", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_center_of_mass", np.zeros(3), units="m")
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("rho_water", 0.0, units="kg/m**3")
self.add_input("tower_top_node", np.zeros(3), units="m")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("rna_mass", 0.0, units="kg")
self.add_input("rna_cg", np.zeros(3), units="m")
self.add_input("mooring_neutral_load", np.zeros((n_attach, 3)), units="N")
self.add_input("platform_variable_capacity", np.zeros(n_member), units="m**3")
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:variable_ballast_Vpts", val=np.zeros(10), units="m**3")
self.add_input(f"member{k}:variable_ballast_spts", val=np.zeros(10))
self.add_output("system_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("system_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("system_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("system_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_L", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("system_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_structural_center_of_mass", np.zeros(3), units="m")
self.add_output("system_structural_mass", 0.0, units="kg")
self.add_output("system_center_of_mass", np.zeros(3), units="m")
self.add_output("system_mass", 0.0, units="kg")
self.add_output("variable_ballast_mass", 0.0, units="kg")
self.add_output("variable_center_of_mass", val=np.zeros(3), units="m")
self.add_output("constr_variable_margin", val=0.0)
self.add_output("member_variable_volume", val=np.zeros(n_member), units="m**3")
self.add_output("member_variable_height", val=np.zeros(n_member))
self.add_output("platform_total_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_I_total", np.zeros(6), units="kg*m**2")
def compute(self, inputs, outputs):
# Combine nodes
node_platform = inputs["platform_nodes"]
node_tower = inputs["tower_nodes"]
nnode_platform = np.where(node_platform[:, 0] == NULL)[0][0]
nnode_tower = np.where(node_tower[:, 0] == NULL)[0][0]
nnode_system = nnode_platform + np.maximum(1, nnode_tower) - 1
nelem_platform = np.where(inputs["platform_elem_A"] == NULL)[0][0]
nelem_tower = np.where(inputs["tower_elem_A"] == NULL)[0][0]
nelem_system = nelem_platform + nelem_tower
# Combine elements indices and have tower base node point to platform transition node
outputs["tower_Fnode"] = np.zeros(node_tower.shape)
outputs["tower_elem_n1"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_n2"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_L"] = NULL * np.ones(MEMMAX)
tower_n1 = np.arange(nelem_tower, dtype=np.int_)
tower_n2 = np.arange(nelem_tower, dtype=np.int_) + 1
outputs["tower_elem_n1"][:nelem_tower] = idx1 = tower_n1.copy()
outputs["tower_elem_n2"][:nelem_tower] = idx2 = tower_n2.copy()
itrans_platform = util.closest_node(node_platform[:nnode_platform, :], inputs["transition_node"])
tower_n1 += nnode_platform - 1
tower_n2 += nnode_platform - 1
tower_n1[0] = itrans_platform
outputs["tower_elem_L"][:nelem_tower] = np.sqrt(
np.sum((node_tower[idx2, :] - node_tower[idx1, :]) ** 2, axis=1)
)
# Store all outputs
outputs["system_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["system_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_L"] = NULL * np.ones(NELEM_MAX)
outputs["system_nodes"][:nnode_system, :] = sysnode = np.vstack(
(node_platform[:nnode_platform, :], node_tower[1:nnode_tower, :])
)
outputs["system_Fnode"][:nnode_system, :] = np.vstack(
(inputs["platform_Fnode"][:nnode_platform, :], outputs["tower_Fnode"][1:nnode_tower, :])
)
outputs["system_Rnode"][:nnode_system] = np.r_[
inputs["platform_Rnode"][:nnode_platform], inputs["tower_Rnode"][1:nnode_tower]
]
outputs["system_elem_n1"][:nelem_system] = idx1 = np.r_[
inputs["platform_elem_n1"][:nelem_platform],
tower_n1,
]
outputs["system_elem_n2"][:nelem_system] = idx2 = np.r_[
inputs["platform_elem_n2"][:nelem_platform],
tower_n2,
]
outputs["system_elem_L"][:nelem_system] = np.sqrt(
np.sum((sysnode[np.int_(idx2), :] - sysnode[np.int_(idx1), :]) ** 2, axis=1)
)
for var in [
"elem_D",
"elem_t",
"elem_A",
"elem_Asx",
"elem_Asy",
"elem_Ixx",
"elem_Iyy",
"elem_Izz",
"elem_rho",
"elem_E",
"elem_G",
"elem_sigma_y",
"elem_qdyn",
]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], inputs["tower_" + var][:nelem_tower]
]
# Have to divide up tower member loads to beginning and end points
for var in ["elem_Px1", "elem_Py1", "elem_Pz1", "elem_Px2", "elem_Py2", "elem_Pz2"]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["tower_" + var] = NULL * np.ones(MEMMAX)
tower_P = inputs["tower_" + var[:-1]]
outputs["tower_" + var][:nelem_tower] = (
tower_P[:nelem_tower] if var[-1] == "1" else tower_P[1 : (nelem_tower + 1)]
)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], outputs["tower_" + var][:nelem_tower]
]
# Mass summaries
m_platform = inputs["platform_mass"]
cg_platform = inputs["platform_hull_center_of_mass"]
I_platform = util.assembleI(inputs["platform_I_hull"])
m_tower = inputs["tower_mass"]
m_rna = inputs["rna_mass"]
m_sys = m_platform + m_tower + m_rna
outputs["system_structural_mass"] = m_sys
outputs["system_structural_center_of_mass"] = (
m_platform * cg_platform
+ m_tower * inputs["tower_center_of_mass"]
+ m_rna * (inputs["rna_cg"] + inputs["tower_top_node"])
) / m_sys
# Balance out variable ballast
mooringFz = inputs["mooring_neutral_load"][:, 2].sum()
capacity = inputs["platform_variable_capacity"]
capacity_sum = capacity.sum() + EPS # Avoid divide by zeros
rho_water = inputs["rho_water"]
m_variable = inputs["platform_displacement"] * rho_water - m_sys + mooringFz / gravity
V_variable = m_variable / rho_water
outputs["variable_ballast_mass"] = m_variable
outputs["constr_variable_margin"] = V_variable / capacity_sum
V_variable_member = V_variable * capacity / capacity_sum
outputs["member_variable_volume"] = V_variable_member
m_variable_member = V_variable_member * rho_water
# Now find the CG of the variable mass assigned to each member
n_member = capacity.size
outputs["member_variable_height"] = np.zeros(n_member)
cg_variable_member = np.zeros((n_member, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
dxyz = xyz[-1, :] - xyz[0, :]
spts = inputs[f"member{k}:variable_ballast_spts"]
Vpts = inputs[f"member{k}:variable_ballast_Vpts"]
s_cg = np.interp(0.5 * V_variable_member[k], Vpts, spts)
cg_variable_member[k, :] = xyz[0, :] + s_cg * dxyz
s_end = np.interp(V_variable_member[k], Vpts, spts)
outputs["member_variable_height"][k] = s_end - spts[0]
cg_variable = np.dot(V_variable_member, cg_variable_member) / V_variable
outputs["variable_center_of_mass"] = cg_variable
# Now find total system mass
outputs["system_mass"] = m_sys + m_variable
outputs["system_center_of_mass"] = (
m_sys * outputs["system_structural_center_of_mass"] + m_variable * cg_variable
) / (m_sys + m_variable)
# Compute the total cg for the platform and the variable ballast together using a weighted sum approach
cg_plat_total = (m_variable * cg_variable + m_platform * cg_platform) / (m_variable + m_platform)
outputs["platform_total_center_of_mass"] = cg_plat_total
# Now loop again to compute variable I
unit_z = np.array([0.0, 0.0, 1.0])
I_variable = np.zeros((3, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
vec_k = xyz[-1, :] - xyz[0, :]
ds = outputs["member_variable_height"][k]
# Compute I aligned with member
h_k = ds * np.sqrt(np.sum(vec_k ** 2))
if h_k == 0.0:
continue
r_k = np.sqrt(V_variable_member[k] / h_k / np.pi)
I_k = (
m_variable_member[k] * np.r_[(3 * r_k ** 2 + h_k ** 2) / 12.0 * np.ones(2), 0.5 * r_k ** 2, np.ones(3)]
)
# Rotate I to global c.s.
T = util.rotate_align_vectors(vec_k, unit_z)
I_k_rot = T @ util.assembleI(I_k) @ T.T
# Now do parallel axis theorem
R = cg_variable - cg_variable_member[k, :]
I_variable += np.array(I_k_rot) + m_variable_member[k] * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Find platform I with variable contribution
I_total = np.zeros((3, 3))
# Compute the full moment of inertia for the platform and variable ballast
R = cg_plat_total - cg_platform
I_total += I_platform + m_platform * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
R = cg_plat_total - cg_variable
I_total += I_variable + m_variable * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
outputs["platform_I_total"] = util.unassembleI(I_total)
class FrameAnalysis(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_added_mass", np.zeros(6), units="kg")
self.add_input("platform_center_of_buoyancy", np.zeros(3), units="m")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input("tower_Fnode", NULL * np.ones((MEMMAX, 3)), units="N")
self.add_input("tower_Rnode", NULL * | np.ones(MEMMAX) | numpy.ones |
"""
Testing code.
Updated BSM February 2017
"""
import sys
import os
import numpy as np
import pytest
from pytest import approx
from numpy.testing import assert_allclose
from scipy.spatial.distance import cdist
from pykrige import kriging_tools as kt
from pykrige import core
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.uk import UniversalKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
allclose_pars = {"rtol": 1e-05, "atol": 1e-08}
@pytest.fixture
def validation_ref():
data = np.genfromtxt(os.path.join(BASE_DIR, "test_data/test_data.txt"))
ok_test_answer, ok_test_gridx, ok_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test1_answer.asc"), footer=2
)
uk_test_answer, uk_test_gridx, uk_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test2_answer.asc"), footer=2
)
return (
data,
(ok_test_answer, ok_test_gridx, ok_test_gridy),
(uk_test_answer, uk_test_gridx, uk_test_gridy),
)
@pytest.fixture
def sample_data_2d():
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 6.0, 1.0)
gridx_2 = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(gridx, gridy)
mask = np.array(xi == yi)
return data, (gridx, gridy, gridx_2), mask
@pytest.fixture
def sample_data_3d():
data = np.array(
[
[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7],
]
)
gridx = np.arange(0.0, 0.6, 0.05)
gridy = np.arange(0.0, 0.6, 0.01)
gridz = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(gridz, gridy, gridx, indexing="ij")
mask = np.array((xi == yi) & (yi == zi))
return data, (gridx, gridy, gridz), mask
def test_core_adjust_for_anisotropy():
X = np.array([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0]), **allclose_pars)
def test_core_adjust_for_anisotropy_3d():
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [90.0, 0.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([1.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 0.0, 2.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, -2.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 90.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 2.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([2.0, 0.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 0.0, 90.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, 0.0, 2.0]), **allclose_pars)
def test_core_make_variogram_parameter_list():
# test of first case - variogram_model_parameters is None
# function should return None unaffected
result = core._make_variogram_parameter_list("linear", None)
assert result is None
# tests for second case - variogram_model_parameters is dict
with pytest.raises(KeyError):
core._make_variogram_parameter_list("linear", {"tacos": 1.0, "burritos": 2.0})
result = core._make_variogram_parameter_list(
"linear", {"slope": 1.0, "nugget": 0.0}
)
assert result == [1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("power", {"frijoles": 1.0})
result = core._make_variogram_parameter_list(
"power", {"scale": 2.0, "exponent": 1.0, "nugget": 0.0}
)
assert result == [2.0, 1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("exponential", {"tacos": 1.0})
with pytest.raises(KeyError):
core._make_variogram_parameter_list(
"exponential", {"range": 1.0, "nugget": 1.0}
)
result = core._make_variogram_parameter_list(
"exponential", {"sill": 5.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list(
"exponential", {"psill": 4.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
with pytest.raises(TypeError):
core._make_variogram_parameter_list("custom", {"junk": 1.0})
with pytest.raises(ValueError):
core._make_variogram_parameter_list("blarg", {"junk": 1.0})
# tests for third case - variogram_model_parameters is list
with pytest.raises(ValueError):
core._make_variogram_parameter_list("linear", [1.0, 2.0, 3.0])
result = core._make_variogram_parameter_list("linear", [1.0, 2.0])
assert result == [1.0, 2.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("power", [1.0, 2.0])
result = core._make_variogram_parameter_list("power", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("exponential", [1.0, 2.0, 3.0, 4.0])
result = core._make_variogram_parameter_list("exponential", [5.0, 2.0, 1.0])
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list("custom", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("junk", [1.0, 1.0, 1.0])
# test for last case - make sure function handles incorrect
# variogram_model_parameters type appropriately
with pytest.raises(TypeError):
core._make_variogram_parameter_list("linear", "tacos")
def test_core_initialize_variogram_model(validation_ref):
data, _, _ = validation_ref
# Note the variogram_function argument is not a string in real life...
# core._initialize_variogram_model also checks the length of input
# lists, which is redundant now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
# core._initialize_variogram_model does also check coordinate type,
# this is NOT redundant
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0, 0.0, 0.0],
"spherical",
6,
False,
"tacos",
)
x = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack((x, y)).T, z, "linear", [0.0, 0.0], "linear", 6, False, "euclidean"
)
assert_allclose(lags, np.array([1.0, 2.0, 3.0]))
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_initialize_variogram_model_3d(sample_data_3d):
data, _, _ = sample_data_3d
# Note the variogram_function argument is not a string in real life...
# again, these checks in core._initialize_variogram_model are redundant
# now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0, 0.0],
"linear",
6,
False,
"geographic",
)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack(
(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
)
).T,
np.array([1.0, 2.0, 3.0, 4.0]),
"linear",
[0.0, 0.0],
"linear",
3,
False,
"euclidean",
)
assert_allclose(
lags, np.array([np.sqrt(3.0), 2.0 * np.sqrt(3.0), 3.0 * np.sqrt(3.0)])
)
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_calculate_variogram_model():
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
False,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
True,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.8284271, 5.1961524, 8.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 1.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 1.4142, 1.7321, 2.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 0.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.2642, 1.7293, 1.9004, 1.9634]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([0.5769, 1.4872, 1.9065, 1.9914]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([3.33060952, 3.85063879, 3.96667301, 3.99256374]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.60487044, 3.85968813, 3.99694817, 3.99998564]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
def test_core_krige():
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22], [43.8, 24.6, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([18.8, 67.9]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([43.8, 24.6]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_core_krige_3d():
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22], [43.8, 24.6, 1.0, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([18.8, 67.9, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([43.8, 24.6, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_non_exact():
# custom data for this test
data = np.array(
[
[0.0, 0.0, 0.47],
[1.5, 1.5, 0.56],
[3, 3, 0.74],
[4.5, 4.5, 1.47],
]
)
# construct grid points so diagonal
# is identical to input points
gridx = np.arange(0.0, 4.51, 1.5)
gridy = np.arange(0.0, 4.51, 1.5)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
ok_non_exact = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
exact_values=False,
)
z_non_exact, ss_non_exact = ok_non_exact.execute(
"grid", gridx, gridy, backend="vectorized"
)
in_values = np.diag(z)
# test that krig field
# at input location are identical
# to the inputs themselves with
# exact_values == True
assert_allclose(in_values, data[:, 2])
# test that krig field
# at input location are different
# than the inputs themselves
# with exact_values == False
assert ~np.allclose(in_values, data[:, 2])
# test that off diagonal values are the same
# by filling with dummy value and comparing
# each entry in array
np.fill_diagonal(z, 0.0)
np.fill_diagonal(z_non_exact, 0.0)
assert_allclose(z, z_non_exact)
def test_ok(validation_ref):
# Test to compare OK results to those obtained using KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, (ok_test_answer, gridx, gridy), _ = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, ok_test_answer)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, ok_test_answer)
def test_ok_update_variogram_model(validation_ref):
data, (ok_test_answer, gridx, gridy), _ = validation_ref
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
with pytest.raises(ValueError):
ok.update_variogram_model("blurg")
ok.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that new parameters equal to the set parameters
assert variogram_model != ok.variogram_model
assert not np.array_equal(variogram_parameters, ok.variogram_model_parameters)
assert anisotropy_scaling != ok.anisotropy_scaling
assert anisotropy_angle != ok.anisotropy_angle
def test_ok_get_variogram_points(validation_ref):
# Test to compare the variogram of OK results to those obtained using
# KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (ok_test_answer, gridx, gridy) = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = ok.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_ok_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], exact_values="blurg")
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
with pytest.raises(ValueError):
ok.execute("blurg", gridx, gridy)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
ok.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
ok.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_cython_ok(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
z1, ss1 = ok.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok_non_exact.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
closest_points = 4
z1, ss1 = ok.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok_non_exact.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
def test_uk(validation_ref):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, uk_test_answer)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, uk_test_answer)
def test_uk_update_variogram_model(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["external_Z"])
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
drift_terms=["external_Z"],
external_drift=np.array([0]),
)
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["point_log"])
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
with pytest.raises(ValueError):
uk.update_variogram_model("blurg")
uk.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that the new parameters are equal to the expected ones
assert variogram_model != uk.variogram_model
assert not np.array_equal(variogram_parameters, uk.variogram_model_parameters)
assert anisotropy_scaling != uk.anisotropy_scaling
assert anisotropy_angle != uk.anisotropy_angle
def test_uk_get_variogram_points(validation_ref):
# Test to compare the variogram of UK with linear drift to results from
# KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
drift_terms=["regional_linear"],
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = uk.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_uk_calculate_data_point_zscalars(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0),
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
assert_allclose(uk.z_scalars, data[:, 0])
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), gridy)
with pytest.raises(ValueError):
uk._calculate_data_point_zscalars(xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
assert_allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1))
def test_uk_execute_single_point():
# Test data and answer from lecture notes by <NAME>, UCLA Stats
data = np.array(
[
[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0],
]
)
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[10.0, 9.99, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute(
"points", np.array([point[0]]), np.array([point[1]]), backend="vectorized"
)
assert z_answer == approx(z[0], rel=0.1)
assert ss_answer == approx(ss[0], rel=0.1)
z, ss = uk.execute(
"points", np.array([61.0]), np.array([139.0]), backend="vectorized"
)
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
z, ss = uk.execute("points", np.array([61.0]), np.array([139.0]), backend="loop")
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
def test_uk_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
exact_values="blurg",
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
uk.execute("blurg", gridx, gridy)
with pytest.raises(ValueError):
uk.execute("grid", gridx, gridy, backend="mrow")
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
uk.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
uk.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_ok_uk_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="vectorized")
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
exact_values=False,
)
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="loop")
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
def test_ok_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok_v, ss_ok_v = ok.execute("grid", gridx, gridy, backend="vectorized")
z_ok_l, ss_ok_l = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok_v, z_ok_l)
assert_allclose(ss_ok_v, ss_ok_l)
def test_uk_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_uk_v, ss_uk_v = uk.execute("grid", gridx, gridy, backend="vectorized")
z_uk_l, ss_uk_l = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_uk_v, z_uk_l)
assert_allclose(ss_uk_v, ss_uk_l)
def test_kriging_tools(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx, gridy)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
z_write, ss_write = ok.execute("masked", gridx, gridy, mask=mask_ref)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert np.ma.allclose(
z_write,
np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True,
rtol=0.01,
atol=0.01,
)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx_2, gridy)
kt.write_asc_grid(
gridx_2,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=2,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx_2, x_read)
assert_allclose(gridy, y_read)
os.remove(os.path.join(BASE_DIR, "test_data/temp.asc"))
# http://doc.pytest.org/en/latest/skipping.html#id1
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_uk_three_primary_drifts(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "external_Z", "point_log"],
point_drift=well,
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
def test_uk_specified_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
xg, yg = np.meshgrid(gridx, gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = (
well[0, 2]
* np.log(np.sqrt((xg - well[0, 0]) ** 2.0 + (yg - well[0, 1]) ** 2.0))
* -1.0
)
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100.0 * well[0, 2] * -1.0
point_log_data = (
well[0, 2]
* np.log(
np.sqrt((data[:, 0] - well[0, 0]) ** 2.0 + (data[:, 1] - well[0, 1]) ** 2.0)
)
* -1.0
)
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100.0 * well[0, 2] * -1.0
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=data[:, 0],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:2, 0]],
)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1]],
)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[gridx, gridy])
with pytest.raises(TypeError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=gridx)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg, point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
def test_uk_functional_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x # noqa
func_y = lambda x, y: y # noqa
def func_well(x, y):
return -well[0, 2] * np.log(
np.sqrt((x - well[0, 0]) ** 2.0 + (y - well[0, 1]) ** 2.0)
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=func_x,
)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y, func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
def test_uk_with_external_drift(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
dem, demx, demy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_dem.asc")
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="spherical",
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=demx,
external_drift_y=demy,
verbose=False,
)
answer, gridx, gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_answer.asc")
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, answer, **allclose_pars)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, answer, **allclose_pars)
def test_force_exact():
data = np.array([[1.0, 1.0, 2.0], [2.0, 2.0, 1.5], [3.0, 3.0, 1.0]])
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 1.0],
)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not (np.any(np.isclose(ss, 0)))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 1.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert z == approx(2.0)
assert ss == approx(0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 2.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert ss != approx(0.0)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = ok.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid",
np.arange(0.5, 10.0, 1.0),
np.arange(0.5, 10.0, 2.0),
backend="vectorized",
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0), backend="loop"
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid", np.arange(0.5, 10.0, 1.0), np.arange(0.5, 10.0, 2.0), backend="loop"
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = uk.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = uk.execute(
"grid",
np.arange(0.5, 10.0, 1.0),
np.arange(0.5, 10.0, 2.0),
backend="vectorized",
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0), backend="loop"
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = uk.execute(
"grid", np.arange(0.5, 10.0, 1.0), np.arange(0.5, 10.0, 2.0), backend="loop"
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
def test_custom_variogram(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
def func(params, dist):
return params[0] * np.log10(dist + params[1]) + params[2]
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="mrow")
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="custom")
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=0,
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=func,
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_parameters=[1.0, 1.0, 1.0],
variogram_function=func,
)
assert uk.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="linear")
uk.update_variogram_model(
"custom", variogram_parameters=[1.0, 1.0, 1.0], variogram_function=func
)
assert uk.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="mrow")
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="custom")
with pytest.raises(ValueError):
OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=0,
)
with pytest.raises(ValueError):
OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=func,
)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_parameters=[1.0, 1.0, 1.0],
variogram_function=func,
)
assert ok.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="linear")
ok.update_variogram_model(
"custom", variogram_parameters=[1.0, 1.0, 1.0], variogram_function=func
)
assert ok.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
def test_ok3d(validation_ref):
data, (ok_test_answer, gridx_ref, gridy_ref), _ = validation_ref
# Test to compare K3D results to those obtained using KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater, vol. 47,
# no. 4, 580-586.)
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
with pytest.raises(ValueError):
OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values="blurg",
)
ok3d_non_exact = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values=False,
)
k, ss = k3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
assert_allclose(np.squeeze(k), ok_test_answer)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop")
assert_allclose(np.squeeze(k), ok_test_answer)
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ans = np.genfromtxt(os.path.join(BASE_DIR, "test_data", "test3d_answer.txt"))
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(k, ans_z, rtol=1e-3, atol=1e-8)
assert_allclose(ss, ans_ss, rtol=1e-3, atol=1e-8)
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
assert_allclose(k, ans_z, rtol=1e-3, atol=1e-8)
assert_allclose(ss, ans_ss, rtol=1e-3, atol=1e-8)
def test_ok3d_moving_window():
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ans = np.genfromtxt(os.path.join(BASE_DIR, "./test_data/test3d_answer.txt"))
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid",
np.arange(10.0),
np.arange(10.0),
np.arange(10.0),
backend="loop",
n_closest_points=10,
)
assert_allclose(k, ans_z, rtol=1e-3)
assert_allclose(ss, ans_ss, rtol=1e-3)
def test_ok3d_uk3d_and_backends_produce_same_results(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
ok3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
ok_v, oss_v = ok3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
ok_l, oss_l = ok3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop"
)
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
uk_v, uss_v = uk3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
assert_allclose(uk_v, ok_v)
uk_l, uss_l = uk3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop"
)
assert_allclose(uk_l, ok_l)
assert_allclose(uk_l, uk_v)
assert_allclose(uss_l, uss_v)
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ok3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
ok_v, oss_v = ok3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
ok_l, oss_l = ok3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
uk_v, uss_v = uk3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(uk_v, ok_v)
assert_allclose(uss_v, oss_v)
uk_l, uss_l = uk3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
assert_allclose(uk_l, ok_l)
assert_allclose(uss_l, oss_l)
assert_allclose(uk_l, uk_v)
assert_allclose(uss_l, uss_v)
def test_ok3d_update_variogram_model(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
with pytest.raises(ValueError):
OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="blurg"
)
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
variogram_model = k3d.variogram_model
variogram_parameters = k3d.variogram_model_parameters
anisotropy_scaling_y = k3d.anisotropy_scaling_y
anisotropy_scaling_z = k3d.anisotropy_scaling_z
anisotropy_angle_x = k3d.anisotropy_angle_x
anisotropy_angle_y = k3d.anisotropy_angle_y
anisotropy_angle_z = k3d.anisotropy_angle_z
with pytest.raises(ValueError):
k3d.update_variogram_model("blurg")
k3d.update_variogram_model(
"power",
anisotropy_scaling_y=3.0,
anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0,
anisotropy_angle_y=45.0,
anisotropy_angle_z=45.0,
)
assert variogram_model != k3d.variogram_model
assert not np.array_equal(variogram_parameters, k3d.variogram_model_parameters)
assert anisotropy_scaling_y != k3d.anisotropy_scaling_y
assert anisotropy_scaling_z != k3d.anisotropy_scaling_z
assert anisotropy_angle_x != k3d.anisotropy_angle_x
assert anisotropy_angle_y != k3d.anisotropy_angle_y
assert anisotropy_angle_z != k3d.anisotropy_angle_z
def test_uk3d_update_variogram_model(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="blurg"
)
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
variogram_model = uk3d.variogram_model
variogram_parameters = uk3d.variogram_model_parameters
anisotropy_scaling_y = uk3d.anisotropy_scaling_y
anisotropy_scaling_z = uk3d.anisotropy_scaling_z
anisotropy_angle_x = uk3d.anisotropy_angle_x
anisotropy_angle_y = uk3d.anisotropy_angle_y
anisotropy_angle_z = uk3d.anisotropy_angle_z
with pytest.raises(ValueError):
uk3d.update_variogram_model("blurg")
uk3d.update_variogram_model(
"power",
anisotropy_scaling_y=3.0,
anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0,
anisotropy_angle_y=45.0,
anisotropy_angle_z=45.0,
)
assert not variogram_model == uk3d.variogram_model
assert not np.array_equal(variogram_parameters, uk3d.variogram_model_parameters)
assert not anisotropy_scaling_y == uk3d.anisotropy_scaling_y
assert not anisotropy_scaling_z == uk3d.anisotropy_scaling_z
assert not anisotropy_angle_x == uk3d.anisotropy_angle_x
assert not anisotropy_angle_y == uk3d.anisotropy_angle_y
assert not anisotropy_angle_z == uk3d.anisotropy_angle_z
def test_ok3d_backends_produce_same_result(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
ok3d_non_exact = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values=False,
)
k_k3d_v, ss_k3d_v = k3d.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized"
)
k_k3d_l, ss_k3d_l = k3d.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, backend="loop"
)
assert_allclose(k_k3d_v, k_k3d_l, rtol=1e-05, atol=1e-8)
assert_allclose(ss_k3d_v, ss_k3d_l, rtol=1e-05, atol=1e-8)
k, ss = ok3d_non_exact.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
k1, ss1 = ok3d_non_exact.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(k1, k)
assert_allclose(ss1, ss)
def test_ok3d_execute(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
with pytest.raises(ValueError):
k3d.execute("blurg", gridx_ref, gridy_ref, gridz_ref)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="loop")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
with pytest.raises(IOError):
k3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="vectorized"
)
k, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="vectorized"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="vectorized"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(IOError):
k3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="loop"
)
k, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="loop"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(ValueError):
k3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="vectorized",
)
k, ss = k3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="vectorized"
)
assert k.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
k3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="loop",
)
k, ss = k3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="loop"
)
assert k.shape == (1,)
assert ss.shape == (1,)
data = np.zeros((125, 4))
z, y, x = np.meshgrid(
np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0)
)
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="vectorized",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="loop",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"points",
[2.5, 2.5, 2.5],
[2.5, 2.5, 2.5],
[1.0, 2.0, 3.0],
backend="vectorized",
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"points", [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1.0, 2.0, 3.0], backend="loop"
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
def test_uk3d_execute(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
with pytest.raises(ValueError):
uk3d.execute("blurg", gridx_ref, gridy_ref, gridz_ref)
k, ss = uk3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
k, ss = uk3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="loop")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
with pytest.raises(IOError):
uk3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="vectorized"
)
k, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="vectorized"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="vectorized"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(IOError):
uk3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="loop"
)
k, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="loop"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(ValueError):
uk3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="vectorized",
)
k, ss = uk3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="vectorized"
)
assert k.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
uk3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="loop",
)
k, ss = uk3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="loop"
)
assert k.shape == (1,)
assert ss.shape == (1,)
data = np.zeros((125, 4))
z, y, x = np.meshgrid(
np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0)
)
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="vectorized",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="loop",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"points",
[2.5, 2.5, 2.5],
[2.5, 2.5, 2.5],
[1.0, 2.0, 3.0],
backend="vectorized",
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"points", [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1.0, 2.0, 3.0], backend="loop"
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
def test_force_exact_3d(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="vectorized"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="loop"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="vectorized"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="loop"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
def test_uk3d_specified_drift(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
zg, yg, xg = np.meshgrid(gridz_ref, gridy_ref, gridx_ref, indexing="ij")
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
)
with pytest.raises(TypeError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=data[:, 0],
)
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:2, 0]],
)
uk_spec = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], data[:, 2]],
)
with pytest.raises(ValueError):
uk_spec.execute(
"grid",
gridx_ref,
gridy_ref,
gridz_ref,
specified_drift_arrays=[gridx_ref, gridy_ref, gridz_ref],
)
with pytest.raises(TypeError):
uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=gridx_ref
)
with pytest.raises(ValueError):
uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=[zg]
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=[xg, yg, zg]
)
uk_lin = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx_ref, gridy_ref, gridz_ref)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
def test_uk3d_functional_drift(sample_data_3d):
data, (gridx, gridy, gridz), mask_ref = sample_data_3d
func_x = lambda x, y, z: x # noqa
func_y = lambda x, y, z: y # noqa
func_z = lambda x, y, z: z # noqa
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
)
with pytest.raises(TypeError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=func_x,
)
uk_func = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y, func_z],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy, gridz)
uk_lin = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy, gridz)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
def test_geometric_code():
# Create selected points distributed across the sphere:
N = 4
lon = np.array([7.0, 7.0, 187.0, 73.231])
lat = np.array([13.23, 13.2301, -13.23, -79.3])
# For the points generated with this reference seed, the distance matrix
# has been calculated using geopy (v. 1.11.0) as follows:
# >>> from geopy.distance import great_circle
# >>> g = great_circle(radius=1.0)
# >>> d = np.zeros((N,N), dtype=float)
# >>> for i in range(N):
# >>> for j in range(N):
# >>> d[i,j] = g.measure((lat[i],lon[i]),(lat[j],lon[j]))
# >>> d *= 180.0/np.pi
# From that distance matrix, the reference values have been obtained.
d_ref = np.array(
[
[0.0, 1e-4, 180.0, 98.744848317171801],
[1e-4, 0.0, 179.9999, 98.744946828324345],
[180.0, 179.9999, 0.0, 81.255151682828213],
[98.744848317171801, 98.744946828324345, 81.255151682828213, 0.0],
]
)
# Calculate distance matrix using the PyKrige code:
d = np.zeros((N, N))
for i in range(N):
for j in range(N):
d[i, j] = core.great_circle_distance(lon[i], lat[i], lon[j], lat[j])
# Test agains reference values:
assert_allclose(d, d_ref)
# Test general features:
assert_allclose(d[np.eye(N, dtype=bool)], 0.0)
np.testing.assert_equal(d >= 0.0, np.ones((N, N), dtype=bool))
assert_allclose(d, d.T)
np.testing.assert_equal(d <= 180.0, np.ones((N, N), dtype=bool))
# Test great_circle_distance and euclid3_to_great_circle against each other
lon_ref = lon
lat_ref = lat
for i in range(len(lon_ref)):
lon, lat = np.meshgrid( | np.linspace(0, 360.0, 20) | numpy.linspace |
import numpy as np
from automon.cb.cb_common_node import CbCommonNode
# Implementation according to https://dl.acm.org/doi/pdf/10.1145/3226113
class CbInnerProductNode(CbCommonNode):
def __init__(self, idx, func_to_monitor=None, d=2, domain=None):
# func_to_monitor must be func_inner_product; however we keep function implementations outside of automon core.
CbCommonNode.__init__(self, idx, d=d, domain=domain, func_to_monitor=func_to_monitor)
def _func_h(self, X, threshold):
if len(X.shape) < 2:
x = X[:X.shape[0]//2]
y = X[X.shape[0]//2:]
res = np.linalg.norm(x + y)**2
else:
x = X[:, :X.shape[1]//2]
y = X[:, X.shape[1]//2:]
res = | np.linalg.norm(x + y, axis=1) | numpy.linalg.norm |
import os
import numpy as np
import hyperspy.api as hs
from pyiron_base._tests import TestWithCleanProject
import pyiron_experimental
class TestHSLineProfiles(TestWithCleanProject):
@classmethod
def setUpClass(cls):
super().setUpClass()
data = hs.load(os.path.join(cls.project.path, '../../notebooks/experiment.emd'))
cls.signal = data[0]
def setUp(self):
self.job = self.project.create.job.HSLineProfiles('tem')
def test_set_signal(self):
signal = self.signal
with self.subTest('No hs signal'):
with self.assertRaises(ValueError):
self.job.signal = None
with self.subTest('intended use case'):
self.job.signal = signal
self.assertEqual(self.job.input.signal.hs_class_name, 'Signal2D')
self.assertEqual(self.job.input.signal.axes, list(signal.axes_manager.as_dictionary().values()))
self.assertTrue(np.array_equal(self.job.input.signal.data, signal.data))
self.assertDictEqual(self.job.input.signal.metadata, signal.metadata.as_dictionary())
self.assertDictEqual(self.job.input.signal.original_metadata, signal.original_metadata.as_dictionary())
with self.subTest('already running'):
self.job.status.running = True
with self.assertRaises(RuntimeError):
self.job.signal = signal
def test_hs(self):
data = self.job.hs.load(os.path.join(self.project.path, '../../notebooks/experiment.emd'))
self.assertEqual(data[0], self.signal)
def test_static_workflow(self):
self.job.signal = self.signal
self.job.input.x = [[0, 50], [50, 50]]
self.job.input.y = [[10, 10], [0, 50]]
self.job.run()
self.assertEqual(len(self.job.output), 2)
with self.subTest('Output line 0'):
output = self.job.output[0]
self.assertEqual(output['line'], 0)
self.assertTrue(np.array_equal(output['x'], [0, 50]), msg=f"Expected {[0, 50]} but got {output['x']}.")
self.assertTrue(np.array_equal(output['y'], [10, 10]), msg=f"Expected {[10, 10]} but got {output['y']}.")
self.assertAlmostEqual(np.sum(output['data']), 1577323.2)
with self.subTest('Output line 1'):
output = self.job.output[1]
self.assertEqual(output['line'], 1)
self.assertTrue(np.array_equal(output['x'], [50, 50]), msg=f"Expected {[50, 50]} but got {output['x']}.")
self.assertTrue(np.array_equal(output['y'], [0, 50]), msg=f"Expected {[0, 50]} but got {output['y']}.")
self.assertAlmostEqual(np.sum(output['data']), 1509104.4)
def test_interactive_workflow(self):
self.job.signal = self.signal
self.job._useblit = False
fig = self.job.plot_signal()
fig.show()
self.job.add_line(x=[0, 50], y=[10, 10])
self.job.plot_line_profiles()
with self.subTest('Output line 0'):
output = self.job.output[0]
self.assertEqual(output['line'], 0)
self.assertTrue(np.array_equal(output['x'], [0, 50]), msg=f"Expected {[0, 50]} but got {output['x']}.")
self.assertTrue(np.array_equal(output['y'], [10, 10]), msg=f"Expected {[10, 10]} but got {output['y']}.")
self.assertAlmostEqual(np.sum(output['data']), 1577323.2)
self.job.add_line(x=[50, 50], y=[0, 50])
self.job.plot_line_profiles()
with self.subTest('Output line 1'):
output = self.job.output[1]
self.assertEqual(output['line'], 0)
self.assertTrue( | np.array_equal(output['x'], [0, 50]) | numpy.array_equal |
import os
import zipfile
import numpy as np
from PIL import Image
from deprecated import deprecated
from imgaug import augmenters as iaa
from torch.utils.data import Dataset
# from datasets.Loader import register_dataset
from datasets.utils.Util import generate_clip_from_image
from util import get_one_hot_vectors
from utils.Constants import COCO_ROOT
from utils.Resize import ResizeMode, resize
COCO_DEFAULT_PATH = "/globalwork/mahadevan/mywork/data/coco/"
NAME = "COCO"
COCO_SUPERCATEGORIES = ["outdoor", "food", "indoor", "appliance", "sports", "person", "animal",
"vehicle", "furniture", "accessory", "electronic", "kitchen"]
# @register_dataset(NAME)
@deprecated(reason="Use COCOv2 instead..")
class COCODataset(Dataset):
def __init__(self, root, is_train=False, crop_size=None,temporal_window=8, resize_mode=ResizeMode.FIXED_SIZE):
self.crop_size = crop_size
self.resize_mode = ResizeMode(resize_mode)
self.data_dir = root
self.is_archive = zipfile.is_zipfile(self.data_dir)
self.temporal_window = temporal_window
subset = "train" if is_train else "valid"
if subset == "train":
self.data_type = "train2014"
self.filter_crowd_images = True
self.min_box_size = 30
else:
self.data_type = "val2014"
self.filter_crowd_images = False
self.min_box_size = -1.0
self.restricted_image_category_list = ['person','bicycle','car','motorcycle','airplane','bus','train','truck',
'boat','bird','cat','dog','horse','sheep','cow','elephant','bear','zebra',
'giraffe','backpack', 'handbag', 'suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat', 'baseball glove', 'skateboard',
'surfboard','tennis racket', 'remote', 'cell phone']
if len(self.restricted_image_category_list) == 0:
self.restricted_image_category_list = None
self.restricted_annotations_category_list = ['person','bicycle','car','motorcycle','airplane','bus','train','truck',
'boat','bird','cat','dog','horse','sheep','cow','elephant','bear','zebra',
'giraffe','backpack', 'handbag', 'suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat', 'baseball glove', 'skateboard',
'surfboard','tennis racket', 'remote', 'cell phone']
if len(self.restricted_annotations_category_list) == 0:
self.restricted_annotations_category_list = None
self.exclude_image_category_list = []
if len(self.exclude_image_category_list) == 0:
self.exclude_image_category_list = None
self.exclude_annotations_category_list = []
if len(self.exclude_annotations_category_list) == 0:
self.exclude_annotations_category_list = None
# Use the minival split as done in https://github.com/rbgirshick/py-faster-rcnn/blob/master/data/README.md
self.annotation_file = '%s/annotations/instances_%s.json' % (self.data_dir, subset)
self.init_coco()
self.inputfile_lists = self.read_inputfile_lists()
def init_coco(self):
# only import this dependency on demand
import pycocotools.coco as coco
self.coco = coco.COCO(self.annotation_file)
ann_ids = self.coco.getAnnIds([])
self.anns = self.coco.loadAnns(ann_ids)
self.label_map = {k - 1: v for k, v in self.coco.cats.items()}
self.filename_to_anns = dict()
self.build_filename_to_anns_dict()
def build_filename_to_anns_dict(self):
for ann in self.anns:
img_id = ann['image_id']
img = self.coco.loadImgs(img_id)
file_name = img[0]['file_name']
if file_name in self.filename_to_anns:
self.filename_to_anns[file_name].append(ann)
else:
self.filename_to_anns[file_name] = [ann]
# self.filename_to_anns[file_name] = ann
self.filter_anns()
def filter_anns(self):
# exclude all images which contain a crowd
if self.filter_crowd_images:
self.filename_to_anns = {f: anns for f, anns in self.filename_to_anns.items()
if not any([an["iscrowd"] for an in anns])}
# filter annotations with too small boxes
if self.min_box_size != -1.0:
self.filename_to_anns = {f: [ann for ann in anns if ann["bbox"][2] >= self.min_box_size and ann["bbox"][3]
>= self.min_box_size] for f, anns in self.filename_to_anns.items()}
# remove annotations with crowd regions
self.filename_to_anns = {f: [ann for ann in anns if not ann["iscrowd"]]
for f, anns in self.filename_to_anns.items()}
# restrict images to contain considered categories
if self.restricted_image_category_list is not None:
print("filtering images to contain categories", self.restricted_image_category_list)
self.filename_to_anns = {f: anns for f, anns in self.filename_to_anns.items()
if any([self.label_map[ann["category_id"] - 1]["name"]
in self.restricted_image_category_list for ann in anns])}
for cat in self.restricted_image_category_list:
n_imgs_for_cat = sum([1 for anns in self.filename_to_anns.values() if
any([self.label_map[ann["category_id"] - 1]["name"] == cat for ann in anns])])
print("number of images containing", cat, ":", n_imgs_for_cat)
# exclude images that only contain objects in the given list
elif self.exclude_image_category_list is not None:
print("Excluding images categories", self.exclude_image_category_list)
self.filename_to_anns = {f: anns for f, anns in self.filename_to_anns.items()
if any([self.label_map[ann["category_id"] - 1]["name"]
not in self.exclude_image_category_list for ann in anns])}
# restrict annotations to considered categories
if self.restricted_annotations_category_list is not None:
print("filtering annotations to categories", self.restricted_annotations_category_list)
self.filename_to_anns = {f: [ann for ann in anns if self.label_map[ann["category_id"] - 1]["name"]
in self.restricted_annotations_category_list]
for f, anns in self.filename_to_anns.items()}
elif self.exclude_annotations_category_list is not None:
print("Excluding annotations for object categories", self.exclude_annotations_category_list)
self.filename_to_anns = {f: [ann for ann in anns if self.label_map[ann["category_id"] - 1]["name"]
not in self.exclude_annotations_category_list]
for f, anns in self.filename_to_anns.items()}
# filter out images without annotations
self.filename_to_anns = {f: anns for f, anns in self.filename_to_anns.items() if len(anns) > 0}
n_before = len(self.anns)
self.anns = []
for anns in self.filename_to_anns.values():
self.anns += anns
n_after = len(self.anns)
print("filtered annotations:", n_before, "->", n_after)
def load_image(self, img_filename):
path = img_filename.split('/')[-1]
img_dir = os.path.join(self.data_dir, "train2014") if path.split('_')[1] == "train2014" else \
os.path.join(self.data_dir, "val2014")
path = os.path.join(img_dir, path)
img = np.array(Image.open(path).convert('RGB'))
return img
def load_annotation(self, img_filename):
anns = self.filename_to_anns[img_filename.split("/")[-1]]
img = self.coco.loadImgs(anns[0]['image_id'])[0]
height = img['height']
width = img['width']
label = np.zeros((height, width, 1))
for ann in anns:
label[:, :, 0] += self.coco.annToMask(ann)[:, :]
if len(np.unique(label)) == 1:
print("GT contains only background.")
return (label != 0).astype(np.uint8)
def read_frame(self, index, instance_id=None):
# use a blend of both full random instance as well as the full object
raw_frames = self.load_image(self.inputfile_lists[index])
raw_masks = self.load_annotation(self.inputfile_lists[index])
tensors_resized = resize({"image":raw_frames, "mask":raw_masks[:, :, 0]}, self.resize_mode, self.crop_size)
return tensors_resized["image"], tensors_resized["mask"]
def read_inputfile_lists(self):
img_dir = '%s/%s/' % (self.data_dir, self.data_type)
# Filtering the image file names since some of them do not have annotations.
imgs = [os.path.join(img_dir,fn) for fn in self.filename_to_anns.keys()]
return imgs
def generate_clip(self, raw_frame, raw_mask):
clip_frames, clip_masks = generate_clip_from_image(raw_frame, raw_mask[...,None], self.temporal_window)
return clip_frames / 255.0, clip_masks[..., 0]
def set_video_id(self, video):
pass
def get_video_ids(self):
return [0]
def __len__(self):
return len(self.inputfile_lists)
def __getitem__(self, index):
info = {}
info['name'] = "coco"
info['num_frames'] = len(self.inputfile_lists)
#info['shape'] = self.shape[sequence]
raw_frames, raw_masks = self.read_frame(index)
raw_frames, raw_masks = self.generate_clip(raw_frames, raw_masks)
raw_frames = np.transpose(raw_frames, (3, 0, 1, 2))
raw_masks = raw_masks[np.newaxis]
info['num_objects'] = len(np.unique(raw_masks))
# padding size to be divide by 32
_,_, h, w = raw_masks.shape
new_h = h + 32 - h % 32 if h % 32 > 0 else h
new_w = w + 32 - w % 32 if w % 32 > 0 else w
# print(new_h, new_w)
lh, uh = (new_h - h) / 2, (new_h - h) / 2 + (new_h - h) % 2
lw, uw = (new_w - w) / 2, (new_w - w) / 2 + (new_w - w) % 2
lh, uh, lw, uw = int(lh), int(uh), int(lw), int(uw)
pad_masks = np.pad(raw_masks, ((0,0),(0,0),(lh, uh), (lw, uw)), mode='constant')
pad_frames = np.pad(raw_frames, ((0, 0),(0, 0),(lh, uh), (lw, uw)), mode='constant')
info['pad'] = ((lh, uh), (lw, uw))
return {'images': pad_frames.astype(np.float32), 'info': info,
'target': pad_masks, "proposals": pad_masks, "raw_proposals": pad_masks,
'raw_masks': pad_masks}
class COCOInstanceDataset(COCODataset):
def __init__(self, root, is_train=False, crop_size=None,temporal_window=8, resize_mode=ResizeMode.FIXED_SIZE):
super(COCOInstanceDataset, self).__init__(root, is_train=is_train, crop_size=crop_size,
temporal_window=temporal_window, resize_mode=resize_mode)
def build_filename_to_anns_dict(self):
for ann in self.anns:
ann_id = ann['id']
img_id = ann['image_id']
img = self.coco.loadImgs(img_id)
file_name = img[0]['file_name']
file_name = file_name + ":" + repr(img_id) + ":" + repr(ann_id)
if file_name in self.filename_to_anns:
print("Ignoring instance as an instance with the same id exists in filename_to_anns.")
else:
self.filename_to_anns[file_name] = [ann]
self.filter_anns()
def load_image(self, img_filename):
path = img_filename.split(':')[0]
path = path.split('/')[-1]
img_dir = os.path.join(self.data_dir, "train2014") if path.split('_')[1] == "train2014" else \
os.path.join(self.data_dir, "val2014")
path = os.path.join(img_dir, path)
img = np.array(Image.open(path).convert('RGB'))
return img
def load_annotation(self, img_filename):
ann = self.filename_to_anns[img_filename.split("/")[-1]]
img = self.coco.loadImgs(ann[0]['image_id'])[0]
height = img['height']
width = img['width']
label = np.zeros((height, width, 1))
label[:, :, 0] = self.coco.annToMask(ann[0])[:, :]
if len(np.unique(label)) == 1:
print("GT contains only background.")
return label.astype(np.uint8)
def __getitem__(self, item):
input_dict = super(COCOInstanceDataset, self).__getitem__(item)
input_dict['masks_guidance'] = input_dict['raw_masks'][:, 0]
return input_dict
class COCOEmbeddingDataset(COCODataset):
def __init__(self, root, is_train=False, crop_size=None,temporal_window=8, resize_mode=ResizeMode.FIXED_SIZE,
num_classes=2):
super(COCOEmbeddingDataset, self).__init__(root=root, is_train=is_train, crop_size=crop_size,
temporal_window=temporal_window, resize_mode=resize_mode)
self.num_classes = num_classes
def load_annotation(self, img_filename):
anns = self.filename_to_anns[img_filename.split("/")[-1]]
img = self.coco.loadImgs(anns[0]['image_id'])[0]
height = img['height']
width = img['width']
label = np.zeros((height, width, 1))
for ann in anns:
label[:, :, 0][self.coco.annToMask(ann) == 1] = (anns.index(ann) + 1)
if len(np.unique(label)) == 1:
print("GT contains only background.")
return label.astype(np.uint8)
def create_sem_seg_from_instances(self, img_filename, instances_masks):
anns = self.filename_to_anns[img_filename.split("/")[-1]]
label = | np.zeros_like(instances_masks) | numpy.zeros_like |
from atm import reference
import numpy as np
from utils import geo
def calc_atm_loss(freq_hz, gas_path_len_m=0, rain_path_len_m=0, cloud_path_len_m=0, atmosphere=None, pol_angle=0,
el_angle=0):
"""
Ref:
ITU-R P.676-11(09/2016) Attenuation by atmospheric gases
ITU-R P.840-6 (09/2013) Attenuation due to clouds and fog
ITU-R P.838-3 (03/2005) Specific attenuation model for rain for use in
prediction methods
Ported from MATLAB Code
<NAME>
16 March 2021
:param freq_hz: Frequency [Hz]
:param gas_path_len_m: Path length for gas loss [m] [default = 0]
:param rain_path_len_m: Path length for rain loss [m] [default = 0]
:param cloud_path_len_m: Path length for cloud loss [m] [default = 0]
:param atmosphere: atm.reference.Atmosphere object (if not provided, standard atmosphere will be generated)
:param pol_angle: Polarization angle [radians], 0 for Horizontal, pi/2 for Vertical, between 0 and pi for slant.
[default = 0]
:param el_angle: Elevation angle of the path under test [default = 0]
:return: loss along the path due to atmospheric absorption [dB, one-way]
"""
if atmosphere is None:
# Default atmosphere is the standard atmosphere at sea level, with no
# fog/clouds or rain.
atmosphere = reference.get_standard_atmosphere(0)
# Compute loss coefficients
if np.any(gas_path_len_m > 0):
coeff_ox, coeff_water = get_gas_loss_coeff(freq_hz, atmosphere.press, atmosphere.water_vapor_press,
atmosphere.temp)
coeff_gas = coeff_ox + coeff_water
else:
coeff_gas = 0
if np.any(rain_path_len_m > 0) and np.any(atmosphere.rainfall) > 0:
coeff_rain = get_rain_loss_coeff(freq_hz, pol_angle, el_angle, atmosphere.rainfall)
else:
coeff_rain = 0
if np.any(cloud_path_len_m > 0) and np.any(atmosphere.cloud_dens) > 0:
coeff_cloud = get_fog_loss_coeff(freq_hz, atmosphere.cloud_dens, atmosphere.temp)
else:
coeff_cloud = 0
# Compute loss components
loss_gass_db = coeff_gas * gas_path_len_m / 1.0e3
loss_rain_db = coeff_rain * rain_path_len_m / 1.0e3
loss_cloud_db = coeff_cloud * cloud_path_len_m / 1.0e3
return loss_gass_db + loss_rain_db + loss_cloud_db
def calc_zenith_loss(freq_hz, alt_start_m=0, zenith_angle_deg=0):
"""
# Computes the cumulative loss from alt_start [m] to zenith (100 km
# altitude), for the given frequencies (freq) in Hz and angle from zenith
# zenith_angle, in degrees.
#
# Does not account for refraction of the signal as it travels through the
# atmosphere; assumes a straight line propagation at the given zenith
# angle.
Ported from MATLAB Code
<NAME>
17 March 2021
:param freq_hz: Carrier frequency [Hz]
:param alt_start_m: Starting altitude [m]
:param zenith_angle_deg: Angle between line of sight and zenith (straight up) [deg]
:return zenith_loss: Cumulative loss to the edge of the atmosphere [dB]
:return zenith_loss_o: Cumulative loss due to dry air [dB]
:return zenith_loss_w: Cumulative loss due to water vapor [dB]
"""
# Add a new first dimension to all the inputs (if they're not scalar)
if np.size(freq_hz) > 1:
freq_hz = np.expand_dims(freq_hz, axis=0)
if np.size(alt_start_m) > 1:
alt_start_m = np.expand_dims(alt_start_m, axis=0)
if np.size(zenith_angle_deg) > 1:
zenith_angle_deg = np.expand_dims(zenith_angle_deg, axis=0)
# Make Altitude Layers
# From ITU-R P.676-11(12/2017), layers should be set at exponential intervals
num_layers = 922 # Used for ceiling of 100 km
layer_delta = .0001*np.exp(np.arange(num_layers)/100) # Layer thicknesses [km], eq 21
layer_delta = np.reshape(layer_delta, (num_layers, 1))
layer_top = np.cumsum(layer_delta) # [km]
layer_bottom = layer_top - layer_delta # [km]
layer_mid = (layer_top+layer_bottom)/2
# Drop layers below alt_start
alt_start_km = alt_start_m / 1e3
layer_mask = layer_top >= min(alt_start_km)
layer_bottom = layer_bottom[layer_mask]
layer_mid = layer_mid[layer_mask]
layer_top = layer_top[layer_mask]
# Lookup standard atmosphere for each band
atmosphere = reference.get_standard_atmosphere(layer_mid*1e3)
# Compute loss coefficient for each band
ao, aw = get_gas_loss_coeff(freq_hz, atmosphere.P, atmosphere.e, atmosphere.T)
# Account for off-nadir paths and partial layers
el_angle_deg = 90 - zenith_angle_deg
layer_delta_eff = geo.compute_slant_range(max(layer_bottom, alt_start_km), layer_top, el_angle_deg, True)
np.place(layer_delta_eff, layer_top <= alt_start_km, 0) # Set all layers below alt_start_km to zero
# Zenith Loss by Layer (loss to pass through each layer)
zenith_loss_by_layer_oxygen = ao*layer_delta_eff
zenith_loss_by_layer_water = aw*layer_delta_eff
# Cumulative Zenith Loss
# Loss from ground to the bottom of each layer
zenith_loss_o = np.squeeze(np.sum(zenith_loss_by_layer_oxygen, axis=0))
zenith_loss_w = np.squeeze(np.sum(zenith_loss_by_layer_water, axis=0))
zenith_loss = zenith_loss_o + zenith_loss_w
return zenith_loss, zenith_loss_o, zenith_loss_w
def get_rain_loss_coeff(freq_hz, pol_angle_rad, el_angle_rad, rainfall_rate):
"""
Computes the rain loss coefficient given a frequency, polarization,
elevation angle, and rainfall rate, according to ITU-R P.838-3, 2005.
Ported from MATLAB Code
<NAME>
16 March 2021
:param freq_hz: Propagation Frequency [Hz]
:param pol_angle_rad: Polarization angle [radians], 0 = Horizontal and pi/2 is Vertical. Slanted polarizations will
have a value 0 and pi.
:param el_angle_rad: Propagation path elevation angle [radians]
:param rainfall_rate: Rainfall rate [mm/hr]
:return: Loss coefficient [dB/km] caused by rain.
"""
# Add a new first dimension to all the inputs (if they're not scalar)
if np.size(freq_hz) > 1:
freq_hz = np.expand_dims(freq_hz, axis=0)
if np.size(pol_angle_rad) > 1:
pol_angle_rad = np.expand_dims(pol_angle_rad, axis=0)
if np.size(el_angle_rad) > 1:
el_angle_rad = np.expand_dims(el_angle_rad, axis=0)
if np.size(rainfall_rate) > 1:
rainfall_rate = np.expand_dims(rainfall_rate, axis=0)
# Coeffs for kh
a = | np.array([-5.3398, -0.35351, -0.23789, -0.94158]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 11:36:21 2020
@author: <NAME>, https://github.com/zhaofenqiang
Contact: <EMAIL>
"""
import numpy as np
import itertools
from sklearn.neighbors import KDTree
from utils import get_neighs_order
import math, multiprocessing, os
abspath = os.path.abspath(os.path.dirname(__file__))
def get_latlon_img(bi_inter, feat):
inter_indices, inter_weights = bi_inter
width = int(np.sqrt(len(inter_indices)))
if len(feat.shape) == 1:
feat = feat[:,np.newaxis]
img = np.sum(np.multiply((feat[inter_indices.flatten()]).reshape((inter_indices.shape[0], inter_indices.shape[1], feat.shape[1])), np.repeat(inter_weights[:,:, np.newaxis], feat.shape[1], axis=-1)), axis=1)
img = img.reshape((width, width, feat.shape[1]))
return img
def isATriangle(neigh_orders, face):
"""
neigh_orders_163842: int, (N*7) x 1
face: int, 3 x 1
"""
neighs = neigh_orders[face[0],:]
if face[1] not in neighs or face[2] not in neighs:
return False
neighs = neigh_orders[face[1],:]
if face[2] not in neighs:
return False
return True
def projectVertex(vertex, v0, v1, v2):
normal = np.cross(v0 - v2, v1 - v2)
if np.linalg.norm(normal) == 0:
normal = v0
ratio = v0.dot(normal)/vertex.dot(normal)
vertex_proj = ratio * vertex
return vertex_proj
def isOnSameSide(P, v0 , v1, v2):
"""
Check if P and v0 is on the same side
"""
edge_12 = v2 - v1
tmp0 = P - v1
tmp1 = v0 - v1
edge_12 = edge_12 / np.linalg.norm(edge_12)
tmp0 = tmp0 / np.linalg.norm(tmp0)
tmp1 = tmp1 / np.linalg.norm(tmp1)
vec1 = np.cross(edge_12, tmp0)
vec2 = np.cross(edge_12, tmp1)
return vec1.dot(vec2) >= 0
def isInTriangle(vertex, v0, v1, v2):
"""
Check if the vertices is in the triangle composed by v0 v1 v2
vertex: N*3, check N vertices at the same time
v0: (3,)
v1: (3,)
v2: (3,)
"""
# Project point onto the triangle plane
P = projectVertex(vertex, v0, v1, v2)
return isOnSameSide(P, v0, v1, v2) and isOnSameSide(P, v1, v2, v0) and isOnSameSide(P, v2, v0, v1)
def singleVertexInterpo_7(vertex, vertices, tree, neigh_orders, k=7):
if k > 15:
# print("use neaerest neighbor, k=", k)
_, top1_near_vertex_index = tree.query(vertex[np.newaxis,:], k=1)
inter_weight = np.array([1,0,0])
inter_indices = np.array([top1_near_vertex_index[0][0], top1_near_vertex_index[0][0], top1_near_vertex_index[0][0]])
return inter_indices, inter_weight
_, top7_near_vertex_index = tree.query(vertex[np.newaxis,:], k=k)
candi_faces = []
for t in itertools.combinations(np.squeeze(top7_near_vertex_index), 3):
tmp = np.asarray(t) # get the indices of the potential candidate triangles
if isATriangle(neigh_orders, tmp):
candi_faces.append(tmp)
if candi_faces:
candi_faces = np.asarray(candi_faces)
else:
if k > 20:
print("cannot find candidate faces, top k shoulb be larger, function recursion, current k =", k)
return singleVertexInterpo_7(vertex, vertices, tree, neigh_orders, k=k+5)
orig_vertex_1 = vertices[candi_faces[:,0]]
orig_vertex_2 = vertices[candi_faces[:,1]]
orig_vertex_3 = vertices[candi_faces[:,2]]
edge_12 = orig_vertex_2 - orig_vertex_1 # edge vectors from vertex 1 to 2
edge_13 = orig_vertex_3 - orig_vertex_1 # edge vectors from vertex 1 to 3
faces_normal = np.cross(edge_12, edge_13) # normals of all the faces
tmp = (np.linalg.norm(faces_normal, axis=1) == 0).nonzero()[0]
faces_normal[tmp] = orig_vertex_1[tmp]
faces_normal_norm = faces_normal / np.linalg.norm(faces_normal, axis=1)[:,np.newaxis]
# use formula p(x) = <p1,n>/<x,n> * x in spherical demons paper to calculate the intersection with each faces
tmp = np.sum(orig_vertex_1 * faces_normal_norm, axis=1) / np.sum(vertex * faces_normal_norm, axis=1)
ratio = tmp[:, np.newaxis]
P = ratio * vertex # intersection points
# find the triangle face that the inersection is in, if the intersection
# is in, the area of 3 small triangles is equal to the whole one
area_BCP = np.linalg.norm(np.cross(orig_vertex_3-P, orig_vertex_2-P), axis=1)/2.0
area_ACP = np.linalg.norm(np.cross(orig_vertex_3-P, orig_vertex_1-P), axis=1)/2.0
area_ABP = np.linalg.norm(np.cross(orig_vertex_2-P, orig_vertex_1-P), axis=1)/2.0
area_ABC = np.linalg.norm(faces_normal, axis=1)/2.0
tmp = area_BCP + area_ACP + area_ABP - area_ABC
index = np.argmin(tmp)
if tmp[index] > 1e-06:
if k > 30:
print("candidate faces don't contain the correct one, top k shoulb be larger, function recursion, current k =", k)
return singleVertexInterpo_7(vertex, vertices, tree, neigh_orders, k=k+5)
w = np.array([area_BCP[index], area_ACP[index], area_ABP[index]])
if w.sum() == 0:
_, top1_near_vertex_index = tree.query(vertex[np.newaxis,:], k=1)
inter_weight = np.array([1,0,0])
inter_indices = np.array([top1_near_vertex_index[0][0], top1_near_vertex_index[0][0], top1_near_vertex_index[0][0]])
else:
inter_weight = w / w.sum()
inter_indices = candi_faces[index]
# print("tmp[index] = ", tmp[index])
# if isInTriangle(vertex, vertices[candi_faces[index][0]], vertices[candi_faces[index][1]], vertices[candi_faces[index][2]]):
# assert False, "threshold should be smaller"
# else:
return inter_indices, inter_weight
def singleVertexInterpo(vertex, vertices, tree, neigh_orders, feat):
"""
Compute the three indices and weights for sphere interpolation at given position.
"""
_, top3_near_vertex_index = tree.query(vertex[np.newaxis,:], k=3)
top3_near_vertex_index = np.squeeze(top3_near_vertex_index)
if isATriangle(neigh_orders, top3_near_vertex_index):
v0 = vertices[top3_near_vertex_index[0]]
v1 = vertices[top3_near_vertex_index[1]]
v2 = vertices[top3_near_vertex_index[2]]
normal = np.cross(v1-v2, v0-v2)
vertex_proj = v0.dot(normal)/vertex.dot(normal) * vertex
area_BCP = np.linalg.norm(np.cross(v2-vertex_proj, v1-vertex_proj))/2.0
area_ACP = np.linalg.norm(np.cross(v2-vertex_proj, v0-vertex_proj))/2.0
area_ABP = np.linalg.norm(np.cross(v1-vertex_proj, v0-vertex_proj))/2.0
area_ABC = np.linalg.norm(normal)/2.0
if area_BCP + area_ACP + area_ABP - area_ABC > 1e-5:
inter_indices, inter_weight = singleVertexInterpo_7(vertex, vertices, tree, neigh_orders)
else:
inter_weight = np.array([area_BCP, area_ACP, area_ABP])
inter_weight = inter_weight / inter_weight.sum()
inter_indices = top3_near_vertex_index
else:
inter_indices, inter_weight = singleVertexInterpo_7(vertex, vertices, tree, neigh_orders)
# print(inter_weight.shape)
return np.sum(np.multiply(feat[inter_indices], np.repeat(inter_weight[:,np.newaxis], feat.shape[1], axis=1)), axis=0)
def multiVertexInterpo(vertexs, vertices, tree, neigh_orders, feat):
feat_inter = np.zeros((vertexs.shape[0], feat.shape[1]))
for i in range(vertexs.shape[0]):
feat_inter[i,:] = singleVertexInterpo(vertexs[i,:], vertices, tree, neigh_orders, feat)
return feat_inter
def resampleStdSphereSurf(n_curr, n_next, feat, upsample_neighbors):
assert len(feat) == n_curr, "feat length not cosistent!"
assert n_next == n_curr*4-6, "n_next == n_curr*4-6, error"
feat_inter = np.zeros((n_next, feat.shape[1]))
feat_inter[0:n_curr, :] = feat
feat_inter[n_curr:, :] = feat[upsample_neighbors].reshape(n_next-n_curr, 2, feat.shape[1]).mean(1)
return feat_inter
def resampleSphereSurf(vertices_fix, vertices_inter, feat, faces=None, std=False, upsample_neighbors=None, neigh_orders=None):
"""
resample sphere surface
Parameters
----------
vertices_fix : N*3, numpy array
DESCRIPTION.
vertices_inter : unknown*3, numpy array, points to be interpolated
DESCRIPTION.
feat : N*D, features to be interpolated
DESCRIPTION.
faces : N*4, numpy array, the first column shoud be all 3
is the original faces directly read using read_vtk,. The default is None.
std : bool
standard sphere interpolation, e.g., interpolate 10242 from 2562.. The default is False.
upsample_neighbors : TYPE, optional
DESCRIPTION. The default is None.
neigh_orders : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
# template = read_vtk('/media/fenqiang/DATA/unc/Data/registration/presentation/regis_sulc_2562_3d_smooth0p33_phiconsis1_3model/training_10242/MNBCP107842_593.lh.SphereSurf.Orig.sphere.resampled.642.DL.origin_3.phi_resampled.2562.moved.sucu_resampled.2562.DL.origin_3.phi_resampled.10242.moved.vtk')
# vertices_fix = template['vertices']
# feat = template['sulc']
# vertices_inter = read_vtk('/media/fenqiang/DATA/unc/Data/Template/Atlas-20200107-newsulc/18/18.lh.SphereSurf.10242.rotated_2.vtk')
# vertices_inter = vertices_inter['vertices']
assert vertices_fix.shape[0] == feat.shape[0], "vertices.shape[0] == feat.shape[0], error"
assert vertices_fix.shape[1] == 3, "vertices size not right"
vertices_fix = vertices_fix.astype(np.float64)
vertices_inter = vertices_inter.astype(np.float64)
feat = feat.astype(np.float64)
vertices_fix = vertices_fix / np.linalg.norm(vertices_fix, axis=1)[:,np.newaxis] # normalize to 1
vertices_inter = vertices_inter / np.linalg.norm(vertices_inter, axis=1)[:,np.newaxis] # normalize to 1
if len(feat.shape) == 1:
feat = feat[:,np.newaxis]
if std:
assert upsample_neighbors is not None, " upsample_neighbors is None"
return resampleStdSphereSurf(len(vertices_fix), len(vertices_inter), feat, upsample_neighbors)
if neigh_orders is None:
if faces is not None:
assert faces.shape[1] == 4, "faces shape is wrong, should be N*4"
assert (faces[:,0] == 3).sum() == faces.shape[0], "the first column of faces should be all 3"
faces = faces[:,[1,2, 3]]
neigh_orders = np.zeros((vertices_fix.shape[0],30), dtype=np.int64)-1
for i in range(faces.shape[0]):
if faces[i,1] not in neigh_orders[faces[i,0]]:
neigh_orders[faces[i,0], np.where(neigh_orders[faces[i,0]] == -1)[0][0]] = faces[i,1]
if faces[i,2] not in neigh_orders[faces[i,0]]:
neigh_orders[faces[i,0], np.where(neigh_orders[faces[i,0]] == -1)[0][0]] = faces[i,2]
if faces[i,0] not in neigh_orders[faces[i,1]]:
neigh_orders[faces[i,1], np.where(neigh_orders[faces[i,1]] == -1)[0][0]] = faces[i,0]
if faces[i,2] not in neigh_orders[faces[i,1]]:
neigh_orders[faces[i,1], np.where(neigh_orders[faces[i,1]] == -1)[0][0]] = faces[i,2]
if faces[i,1] not in neigh_orders[faces[i,2]]:
neigh_orders[faces[i,2], np.where(neigh_orders[faces[i,2]] == -1)[0][0]] = faces[i,1]
if faces[i,0] not in neigh_orders[faces[i,2]]:
neigh_orders[faces[i,2], np.where(neigh_orders[faces[i,2]] == -1)[0][0]] = faces[i,0]
else:
neigh_orders = get_neighs_order(abspath+'/neigh_indices/adj_mat_order_'+ str(vertices_fix.shape[0]) +'.mat')
neigh_orders = neigh_orders.reshape(vertices_fix.shape[0], 7)
else:
neigh_orders = neigh_orders.reshape(vertices_fix.shape[0], 7)
feat_inter = np.zeros((vertices_inter.shape[0], feat.shape[1]))
tree = KDTree(vertices_fix, leaf_size=10) # build kdtree
""" Single process, single thread: 163842: 54.5s, 40962: 12.7s, 10242: 3.2s, 2562: 0.8s """
# for i in range(vertices_inter.shape[0]):
# print(i)
# feat_inter[i,:] = singleVertexInterpo(vertices_inter[i,:], vertices_fix, tree, neigh_orders, feat)
""" multiple processes method: 163842: 9.6s, 40962: 2.8s, 10242: 1.0s, 2562: 0.28s """
pool = multiprocessing.Pool()
cpus = multiprocessing.cpu_count()
vertexs_num_per_cpu = math.ceil(vertices_inter.shape[0]/cpus)
results = []
for i in range(cpus):
results.append(pool.apply_async(multiVertexInterpo, args=(vertices_inter[i*vertexs_num_per_cpu:(i+1)*vertexs_num_per_cpu,:], vertices_fix, tree, neigh_orders, feat,)))
pool.close()
pool.join()
for i in range(cpus):
feat_inter[i*vertexs_num_per_cpu:(i+1)*vertexs_num_per_cpu,:] = results[i].get()
return | np.squeeze(feat_inter) | numpy.squeeze |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 14:27:01 2021
@author: Hatlab_3
"""
# import easygui
from plottr.apps.autoplot import autoplotDDH5, script, main
from plottr.data.datadict_storage import all_datadicts_from_hdf5
import matplotlib.pyplot as plt
import numpy as np
from data_processing.Helper_Functions import get_name_from_path, shift_array_relative_to_middle, log_normalize_to_row, select_closest_to_target, log_normalize_up_to_row
import matplotlib.colors as color
from scipy.ndimage import gaussian_filter
#Get Taco (irregular imshow)
def make_tacos(bias_current, gen_frequency, gen_power, calculated_gain, replace_nan = False, vmin = 15, vmax = 25, fancy = False, target = 20):
fig, ax = plt.subplots(1,1)
if replace_nan:
calculated_gain[np.isnan(calculated_gain)] = 0
img = ax.scatter(gen_frequency/1e6, gen_power, c = calculated_gain, cmap = 'seismic', vmin = vmin, vmax = vmax, zorder = 1)
cb = fig.colorbar(img, ax = ax)
unique_freqs = np.unique(gen_frequency)
best_powers = [select_closest_to_target(gen_power[gen_frequency == f], calculated_gain[gen_frequency == f], target) for f in unique_freqs]
ax.plot(unique_freqs/1e6, best_powers, 'k-', lw = 2)
return fig, ax, cb
def make_gain_profiles(filepath, replace_nan = False, vmin = 15, vmax = 25, angles = [45, 45]):
gainDict = all_datadicts_from_hdf5(filepath)['data']
calc_gain = gainDict.extract('calculated_gain').data_vals('calculated_gain')
gen_frequency_calc = gainDict.extract('calculated_gain').data_vals('gen_frequency')
gen_power_calc = gainDict.extract('calculated_gain').data_vals('gen_power')
unique_freqs = np.unique(gen_frequency_calc)
best_powers = [select_closest_to_target(gen_power_calc[gen_frequency_calc == f], calc_gain[gen_frequency_calc == f], 20) for f in unique_freqs]
gain_traces = gainDict.extract('gain_trace').data_vals('gain_trace')
gen_power = gainDict.extract('gain_trace').data_vals('gen_power')
vna_freqs = gainDict.extract('gain_trace').data_vals('vna_frequency')
gen_frequency_raw = gainDict.extract('gain_trace').data_vals('gen_frequency')
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.azim = angles[0]
ax.elev = angles[1]
for best_power in best_powers:
gp_filt = np.isclose(gen_power, best_power, atol = 0.05)
f_val= np.round(np.average(gen_frequency_raw[gp_filt])/1e6, 0)
ax.plot(f_val*np.ones(np.size(vna_freqs[gp_filt][0])), vna_freqs[gp_filt][0]/1e6, gain_traces[gp_filt][0])
return fig, ax
def make_gain_surface(filepath, replace_nan = False, vmin = 15, vmax = 25, angles = [45, 45]):
gainDict = all_datadicts_from_hdf5(filepath)['data']
calc_gain = gainDict.extract('calculated_gain').data_vals('calculated_gain')
gen_frequency_calc = gainDict.extract('calculated_gain').data_vals('gen_frequency')
gen_power_calc = gainDict.extract('calculated_gain').data_vals('gen_power')
unique_freqs = np.unique(gen_frequency_calc)
best_powers = [select_closest_to_target(gen_power_calc[gen_frequency_calc == f], calc_gain[gen_frequency_calc == f], 20) for f in unique_freqs]
gain_traces = gainDict.extract('gain_trace').data_vals('gain_trace')
gen_power = gainDict.extract('gain_trace').data_vals('gen_power')
vna_freqs = gainDict.extract('gain_trace').data_vals('vna_frequency')
gen_frequency_raw = gainDict.extract('gain_trace').data_vals('gen_frequency')
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.azim = angles[0]
ax.elev = angles[1]
gen_f_array = []
sig_f_array = []
gain_array = []
for best_power in best_powers:
gp_filt = gen_power == best_power
f_val= np.round(np.average(gen_frequency_raw[gp_filt])/1e6, 0)
gen_f_array.append(f_val*np.ones(np.size(vna_freqs[gp_filt][0])))
sig_f_array.append(vna_freqs[gp_filt][0]/1e6)
gain_array.append(gain_traces[gp_filt][0])
gen_f_array = np.array(gen_f_array)
sig_f_array = np.array(sig_f_array)
gain_array = np.array(gain_array)
ax.plot_surface(gen_f_array, sig_f_array, gain_array, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
return fig, ax
def make_sat_img_plot(sat_bias_current, sat_gen_freq, sat_vna_powers, sat_gain, levels = [-2,-1.5,-1, -0.25, 0.25,1, 1.5,2], norm_power = -40, x_val = None, filter_window = 0, vmin = -1, vmax = 1):
y_norm_val = norm_power #Signal power at which to normalize the rest of the plot to
# print(f"Normalized to VNA_Power = {y_norm_val}dB")
fig, ax = plt.subplots()
colors = [color.hex2color('#4444FF'), color.hex2color('#FFFFFF'), color.hex2color('#888888'), color.hex2color('#888888'),color.hex2color('#FFFFFF'), color.hex2color('#FF4444')]
_cmap = color.LinearSegmentedColormap.from_list('my_cmap', colors)
smoothed_normed_data = log_normalize_to_row(sat_gen_freq, sat_vna_powers[0], gaussian_filter(sat_gain.T, (filter_window,0)), y_norm_val= y_norm_val)
img = ax.pcolormesh(sat_gen_freq/1e6, sat_vna_powers[0],
smoothed_normed_data,
# gaussian_filter(sat_gain.T, 5),
cmap = _cmap,
vmin = vmin, vmax = vmax)
#getting saturation points
sat_powers = []
for col in smoothed_normed_data.T:
buffer = np.size(col[sat_vna_powers[0]<= y_norm_val])
#append locations of +1dB and -1dB points
try:
pos_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], 1, atol = 1e-2))[0])
except ValueError:
pos_loc = np.size(col)-1
try:
neg_loc = buffer+np.min(np.where(np.isclose(col[sat_vna_powers[0]>y_norm_val], -1, atol = 1e-2))[0])
except ValueError:
neg_loc = np.size(col)-1
# print(f"Pos: {pos_loc} \nNeg: {neg_loc}")
loc_arr = np.array([pos_loc, neg_loc])
loc_arr = np.floor(loc_arr[np.logical_not(np.isnan(loc_arr))]).astype(int)
# print(loc_arr)
loc = np.min(loc_arr)
sat_powers.append(sat_vna_powers[0][loc])
ax.plot((np.array(sat_gen_freq+(sat_gen_freq[1]-sat_gen_freq[0])/2)/1e6)[0:-1], sat_powers[0:-1], 'k o')
#plot the best one as a star
max_loc = np.where(sat_powers[0:-1] == np.max(sat_powers[0:-1]))[0][0]
# print(max_loc)
plt.plot((np.array(sat_gen_freq+(sat_gen_freq[1]-sat_gen_freq[0])/2)/1e6)[max_loc], sat_powers[max_loc], 'r*', markersize = 5)
ax.hlines(y = y_norm_val, xmin = np.min(sat_gen_freq/1e6), xmax = np.max(sat_gen_freq/1e6), color = 'b', lw = 2)
return fig, ax, img
def superTACO_Lines(filepaths, angles = [45,45], quanta_size = None, quant_offset = None):
#step 1: assemble best powers into bias_currents vs. (gen_freq vs. best_powers) array
#ie for each n bias current there is a gen_freq array
#and for each m(n) gen_freq there is one gen_power that is best (could be NaN if garbage)
#feed this into n mplot3d commands each with their own oclor and legend label
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.azim = angles[0]
ax.elev = angles[1]
bias_currents = []
best_gen_frequencies = []
best_gen_powers = []
for gain_filepath in filepaths:
#extract the best gen powers
gain_dicts = all_datadicts_from_hdf5(gain_filepath)
gainDict = gain_dicts['data']
gain_data = gainDict.extract('calculated_gain')
[bias_current, gen_frequency, gen_power, calc_gain] = [gain_data.data_vals('bias_current'),
gain_data.data_vals('gen_frequency'),
gain_data.data_vals('gen_power'),
gain_data.data_vals('calculated_gain')
]
for current in np.unique(bias_current): #could be multiple bias currents in one single TACO datafile
bias_currents.append(current)
print(f"{gain_filepath}\nCURRENT: {current*1000}mA")
filt = bias_current == current
cfreqs = gen_frequency[filt]
cpowers = gen_power[filt]
unique_freqs = np.unique(cfreqs)
cgain = calc_gain[filt]
best_powers = [select_closest_to_target(cpowers[cfreqs == f], cgain[cfreqs == f], 20) for f in unique_freqs]
#convert freqs to detuning from best power
best_power = np.min(best_powers)
best_gen_powers.append(best_power)
best_freq = np.average(unique_freqs[np.where(best_powers == best_power)])
best_gen_frequencies.append(best_freq)
adjusted_freqs = unique_freqs - best_freq
if quanta_size != None:
quant_frac = | np.round((current-quant_offset)/quanta_size, 3) | numpy.round |
import os
import numpy as np
import wx
import wx.lib.buttons as buttons
import dateutil # required by matplotlib
#from matplotlib import pyplot as plt
import matplotlib
matplotlib.use('Agg') # Important for Windows version of installer
from matplotlib import rc as matplotlib_rc
try:
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
except Exception as e:
print('')
print('Error: problem importing `matplotlib.backends.backend_wx`.')
import platform
if platform.system()=='Darwin':
print('')
print('pyDatView help:')
print(' This is a typical issue on MacOS, most likely you are')
print(' using the native MacOS python with the native matplolib')
print(' library, which is incompatible with `wxPython`.')
print('')
print(' You can solve this by either:')
print(' - using python3, and pip3 e.g. installing it with brew')
print(' - using a virtual environment with python 2 or 3')
print(' - using anaconda with python 2 or 3');
print('')
import sys
sys.exit(1)
else:
raise e
from matplotlib.figure import Figure
from matplotlib.pyplot import rcParams as pyplot_rc
from matplotlib import font_manager
from pandas.plotting import register_matplotlib_converters
import gc
from .common import * # unique, CHAR
from .plotdata import PlotData, compareMultiplePD
from .GUICommon import *
from .GUIToolBox import MyMultiCursor, MyNavigationToolbar2Wx, TBAddTool, TBAddCheckTool
from .GUIMeasure import GUIMeasure
from . import icons
font = {'size' : 8}
matplotlib_rc('font', **font)
pyplot_rc['agg.path.chunksize'] = 20000
class PDFCtrlPanel(wx.Panel):
def __init__(self, parent):
super(PDFCtrlPanel,self).__init__(parent)
self.parent = parent
lb = wx.StaticText( self, -1, 'Number of bins:')
self.scBins = wx.SpinCtrl(self, value='50',size=wx.Size(70,-1))
self.scBins.SetRange(3, 10000)
self.cbSmooth = wx.CheckBox(self, -1, 'Smooth',(10,10))
self.cbSmooth.SetValue(False)
dummy_sizer = wx.BoxSizer(wx.HORIZONTAL)
dummy_sizer.Add(lb ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.scBins ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.cbSmooth ,0, flag = wx.CENTER|wx.LEFT,border = 6)
self.SetSizer(dummy_sizer)
self.Bind(wx.EVT_TEXT , self.onPDFOptionChange, self.scBins)
self.Bind(wx.EVT_CHECKBOX, self.onPDFOptionChange)
self.Hide()
def onPDFOptionChange(self,event=None):
self.parent.load_and_draw(); # DATA HAS CHANGED
class MinMaxPanel(wx.Panel):
def __init__(self, parent):
super(MinMaxPanel,self).__init__(parent)
self.parent = parent
self.cbxMinMax = wx.CheckBox(self, -1, 'xMinMax',(10,10))
self.cbyMinMax = wx.CheckBox(self, -1, 'yMinMax',(10,10))
self.cbxMinMax.SetValue(False)
self.cbyMinMax.SetValue(True)
dummy_sizer = wx.BoxSizer(wx.HORIZONTAL)
dummy_sizer.Add(self.cbxMinMax ,0, flag=wx.CENTER|wx.LEFT, border = 1)
dummy_sizer.Add(self.cbyMinMax ,0, flag=wx.CENTER|wx.LEFT, border = 1)
self.SetSizer(dummy_sizer)
self.Bind(wx.EVT_CHECKBOX, self.onMinMaxChange)
self.Hide()
def onMinMaxChange(self,event=None):
self.parent.load_and_draw(); # DATA HAS CHANGED
class CompCtrlPanel(wx.Panel):
def __init__(self, parent):
super(CompCtrlPanel,self).__init__(parent)
self.parent = parent
lblList = ['Relative', '|Relative|','Ratio','Absolute','Y-Y']
self.rbType = wx.RadioBox(self, label = 'Type', choices = lblList,
majorDimension = 1, style = wx.RA_SPECIFY_ROWS)
dummy_sizer = wx.BoxSizer(wx.HORIZONTAL)
dummy_sizer.Add(self.rbType ,0, flag = wx.CENTER|wx.LEFT,border = 1)
self.SetSizer(dummy_sizer)
self.rbType.Bind(wx.EVT_RADIOBOX,self.onTypeChange)
self.Hide()
def onTypeChange(self,e):
self.parent.load_and_draw(); # DATA HAS CHANGED
class SpectralCtrlPanel(wx.Panel):
def __init__(self, parent):
super(SpectralCtrlPanel,self).__init__(parent)
self.parent = parent
# --- GUI widgets
lb = wx.StaticText( self, -1, 'Type:')
self.cbType = wx.ComboBox(self, choices=['PSD','f x PSD','Amplitude'] , style=wx.CB_READONLY)
self.cbType.SetSelection(0)
lbAveraging = wx.StaticText( self, -1, 'Avg.:')
self.cbAveraging = wx.ComboBox(self, choices=['None','Welch'] , style=wx.CB_READONLY)
self.cbAveraging.SetSelection(1)
self.lbAveragingMethod = wx.StaticText( self, -1, 'Window:')
self.cbAveragingMethod = wx.ComboBox(self, choices=['Hamming','Hann','Rectangular'] , style=wx.CB_READONLY)
self.cbAveragingMethod.SetSelection(0)
self.lbP2 = wx.StaticText( self, -1, '2^n:')
self.scP2 = wx.SpinCtrl(self, value='11',size=wx.Size(40,-1))
self.lbWinLength = wx.StaticText( self, -1, '(2048) ')
self.scP2.SetRange(3, 19)
lbMaxFreq = wx.StaticText( self, -1, 'Xlim:')
self.tMaxFreq = wx.TextCtrl(self,size = (30,-1),style=wx.TE_PROCESS_ENTER)
self.tMaxFreq.SetValue("-1")
self.cbDetrend = wx.CheckBox(self, -1, 'Detrend',(10,10))
lbX = wx.StaticText( self, -1, 'x:')
self.cbTypeX = wx.ComboBox(self, choices=['1/x','2pi/x','x'] , style=wx.CB_READONLY)
self.cbTypeX.SetSelection(0)
# Layout
dummy_sizer = wx.BoxSizer(wx.HORIZONTAL)
dummy_sizer.Add(lb ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.cbType ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbAveraging ,0, flag = wx.CENTER|wx.LEFT,border = 6)
dummy_sizer.Add(self.cbAveraging ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.lbAveragingMethod,0, flag = wx.CENTER|wx.LEFT,border = 6)
dummy_sizer.Add(self.cbAveragingMethod,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.lbP2 ,0, flag = wx.CENTER|wx.LEFT,border = 6)
dummy_sizer.Add(self.scP2 ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.lbWinLength ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbMaxFreq ,0, flag = wx.CENTER|wx.LEFT,border = 6)
dummy_sizer.Add(self.tMaxFreq ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbX ,0, flag = wx.CENTER|wx.LEFT,border = 6)
dummy_sizer.Add(self.cbTypeX ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.cbDetrend ,0, flag = wx.CENTER|wx.LEFT,border = 7)
self.SetSizer(dummy_sizer)
self.Bind(wx.EVT_COMBOBOX ,self.onSpecCtrlChange)
self.Bind(wx.EVT_TEXT ,self.onP2ChangeText ,self.scP2 )
self.Bind(wx.EVT_TEXT_ENTER,self.onXlimChange ,self.tMaxFreq )
self.Bind(wx.EVT_CHECKBOX ,self.onDetrendChange ,self.cbDetrend)
self.Hide()
def onXlimChange(self,event=None):
self.parent.redraw_same_data();
def onSpecCtrlChange(self,event=None):
self.parent.load_and_draw() # Data changes
def onDetrendChange(self,event=None):
self.parent.load_and_draw() # Data changes
def onP2ChangeText(self,event=None):
nExp=self.scP2.GetValue()
self.updateP2(nExp)
self.parent.load_and_draw() # Data changes
def updateP2(self,P2):
self.lbWinLength.SetLabel("({})".format(2**P2))
class PlotTypePanel(wx.Panel):
def __init__(self, parent):
# Superclass constructor
super(PlotTypePanel,self).__init__(parent)
#self.SetBackgroundColour('yellow')
# data
self.parent = parent
# --- Ctrl Panel
self.cbRegular = wx.RadioButton(self, -1, 'Regular',style=wx.RB_GROUP)
self.cbPDF = wx.RadioButton(self, -1, 'PDF' , )
self.cbFFT = wx.RadioButton(self, -1, 'FFT' , )
self.cbMinMax = wx.RadioButton(self, -1, 'MinMax' , )
self.cbCompare = wx.RadioButton(self, -1, 'Compare', )
self.cbRegular.SetValue(True)
self.Bind(wx.EVT_RADIOBUTTON, self.pdf_select , self.cbPDF )
self.Bind(wx.EVT_RADIOBUTTON, self.fft_select , self.cbFFT )
self.Bind(wx.EVT_RADIOBUTTON, self.minmax_select , self.cbMinMax )
self.Bind(wx.EVT_RADIOBUTTON, self.compare_select, self.cbCompare)
self.Bind(wx.EVT_RADIOBUTTON, self.regular_select, self.cbRegular)
# LAYOUT
cb_sizer = wx.FlexGridSizer(rows=5, cols=1, hgap=0, vgap=0)
cb_sizer.Add(self.cbRegular , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbPDF , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbFFT , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbMinMax , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbCompare , 0, flag=wx.ALL, border=1)
self.SetSizer(cb_sizer)
def plotType(self):
plotType='Regular'
if self.cbMinMax.GetValue():
plotType='MinMax'
elif self.cbPDF.GetValue():
plotType='PDF'
elif self.cbFFT.GetValue():
plotType='FFT'
elif self.cbCompare.GetValue():
plotType='Compare'
return plotType
def regular_select(self, event=None):
self.clear_measures()
self.parent.cbLogY.SetValue(False)
#
self.parent.spcPanel.Hide();
self.parent.pdfPanel.Hide();
self.parent.cmpPanel.Hide();
self.parent.mmxPanel.Hide();
self.parent.slEsth.Hide();
self.parent.plotsizer.Layout()
#
self.parent.load_and_draw() # Data changes
def compare_select(self, event=None):
self.clear_measures()
self.parent.cbLogY.SetValue(False)
self.parent.show_hide(self.parent.cmpPanel, self.cbCompare.GetValue())
self.parent.spcPanel.Hide();
self.parent.pdfPanel.Hide();
self.parent.mmxPanel.Hide();
self.parent.plotsizer.Layout()
self.parent.load_and_draw() # Data changes
def fft_select(self, event=None):
self.clear_measures()
self.parent.show_hide(self.parent.spcPanel, self.cbFFT.GetValue())
self.parent.cbLogY.SetValue(self.cbFFT.GetValue())
self.parent.pdfPanel.Hide();
self.parent.mmxPanel.Hide();
self.parent.plotsizer.Layout()
self.parent.load_and_draw() # Data changes
def pdf_select(self, event=None):
self.clear_measures()
self.parent.cbLogX.SetValue(False)
self.parent.cbLogY.SetValue(False)
self.parent.show_hide(self.parent.pdfPanel, self.cbPDF.GetValue())
self.parent.spcPanel.Hide();
self.parent.cmpPanel.Hide();
self.parent.mmxPanel.Hide();
self.parent.plotsizer.Layout()
self.parent.load_and_draw() # Data changes
def minmax_select(self, event):
self.clear_measures()
self.parent.cbLogY.SetValue(False)
self.parent.show_hide(self.parent.mmxPanel, self.cbMinMax.GetValue())
self.parent.spcPanel.Hide();
self.parent.pdfPanel.Hide();
self.parent.cmpPanel.Hide();
self.parent.plotsizer.Layout()
self.parent.load_and_draw() # Data changes
def clear_measures(self):
self.parent.rightMeasure.clear()
self.parent.leftMeasure.clear()
self.parent.lbDeltaX.SetLabel('')
self.parent.lbDeltaY.SetLabel('')
class EstheticsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent=parent
#self.SetBackgroundColour('red')
lbFont = wx.StaticText( self, -1, 'Font:')
self.cbFont = wx.ComboBox(self, choices=['6','7','8','9','10','11','12','13','14','15','16','17','18'] , style=wx.CB_READONLY)
self.cbFont.SetSelection(2)
# NOTE: we don't offer "best" since best is slow
lbLegend = wx.StaticText( self, -1, 'Legend:')
self.cbLegend = wx.ComboBox(self, choices=['None','Upper right','Upper left','Lower left','Lower right','Right','Center left','Center right','Lower center','Upper center','Center'] , style=wx.CB_READONLY)
self.cbLegend.SetSelection(1)
lbLgdFont = wx.StaticText( self, -1, 'Legend font:')
self.cbLgdFont = wx.ComboBox(self, choices=['6','7','8','9','10','11','12','13','14','15','16','17','18'] , style=wx.CB_READONLY)
self.cbLgdFont.SetSelection(2)
lbLW = wx.StaticText( self, -1, 'Line width:')
self.cbLW = wx.ComboBox(self, choices=['0.5','1.0','1.5','2.0','2.5','3.0'] , style=wx.CB_READONLY)
self.cbLW.SetSelection(2)
lbMS = wx.StaticText( self, -1, 'Marker size:')
self.cbMS= wx.ComboBox(self, choices=['0.5','1','2','3','4','5','6','7','8'] , style=wx.CB_READONLY)
self.cbMS.SetSelection(2)
# Layout
#dummy_sizer = wx.BoxSizer(wx.HORIZONTAL)
dummy_sizer = wx.WrapSizer(orient=wx.HORIZONTAL)
dummy_sizer.Add(lbFont ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(self.cbFont ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbLW ,0, flag = wx.CENTER|wx.LEFT,border = 5)
dummy_sizer.Add(self.cbLW ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbMS ,0, flag = wx.CENTER|wx.LEFT,border = 5)
dummy_sizer.Add(self.cbMS ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbLegend ,0, flag = wx.CENTER|wx.LEFT,border = 5)
dummy_sizer.Add(self.cbLegend ,0, flag = wx.CENTER|wx.LEFT,border = 1)
dummy_sizer.Add(lbLgdFont ,0, flag = wx.CENTER|wx.LEFT,border = 5)
dummy_sizer.Add(self.cbLgdFont ,0, flag = wx.CENTER|wx.LEFT,border = 1)
self.SetSizer(dummy_sizer)
self.Hide()
# Callbacks
self.Bind(wx.EVT_COMBOBOX ,self.onAnyEsthOptionChange)
self.cbFont.Bind(wx.EVT_COMBOBOX ,self.onFontOptionChange)
def onAnyEsthOptionChange(self,event=None):
self.parent.redraw_same_data()
def onFontOptionChange(self,event=None):
matplotlib_rc('font', **{'size':int(self.cbFont.Value) }) # affect all (including ticks)
self.onAnyEsthOptionChange()
class PlotPanel(wx.Panel):
def __init__(self, parent, selPanel,infoPanel=None, mainframe=None):
# Superclass constructor
super(PlotPanel,self).__init__(parent)
# Font handling
font = parent.GetFont()
font.SetPointSize(font.GetPointSize()-1)
self.SetFont(font)
# Preparing a special font manager for chinese characters
self.specialFont=None
CH_F_PATHS = [
os.path.join(matplotlib.get_data_path(), 'fonts/ttf/SimHei.ttf'),
os.path.join(os.path.dirname(__file__),'../SimHei.ttf')]
for fpath in CH_F_PATHS:
if os.path.exists(fpath):
fontP = font_manager.FontProperties(fname=fpath)
fontP.set_size(font.GetPointSize())
self.specialFont=fontP
break
# data
self.selPanel = selPanel # <<< dependency with selPanel should be minimum
self.selMode = ''
self.infoPanel=infoPanel
self.infoPanel.setPlotMatrixCallbacks(self._onPlotMatrixLeftClick, self._onPlotMatrixRightClick)
self.parent = parent
self.mainframe= mainframe
self.plotData = []
self.plotDataOptions=dict()
if self.selPanel is not None:
bg=self.selPanel.BackgroundColour
self.SetBackgroundColour(bg) # sowhow, our parent has a wrong color
#self.SetBackgroundColour('red')
self.leftMeasure = GUIMeasure(1, 'firebrick')
self.rightMeasure = GUIMeasure(2, 'darkgreen')
self.xlim_prev = [[0, 1]]
self.ylim_prev = [[0, 1]]
# GUI
self.fig = Figure(facecolor="white", figsize=(1, 1))
register_matplotlib_converters()
self.canvas = FigureCanvas(self, -1, self.fig)
self.canvas.mpl_connect('motion_notify_event', self.onMouseMove)
self.canvas.mpl_connect('button_press_event', self.onMouseClick)
self.canvas.mpl_connect('button_release_event', self.onMouseRelease)
self.canvas.mpl_connect('draw_event', self.onDraw)
self.clickLocation = (None, 0, 0)
self.navTBTop = MyNavigationToolbar2Wx(self.canvas, ['Home', 'Pan'])
self.navTBBottom = MyNavigationToolbar2Wx(self.canvas, ['Subplots', 'Save'])
TBAddCheckTool(self.navTBBottom,'', icons.chart.GetBitmap(), self.onEsthToggle)
self.esthToggle=False
self.navTBBottom.Realize()
#self.navTB = wx.ToolBar(self, style=wx.TB_HORIZONTAL|wx.TB_HORZ_LAYOUT|wx.TB_NODIVIDER|wx.TB_FLAT)
#self.navTB.SetMargins(0,0)
#self.navTB.SetToolPacking(0)
#self.navTB.AddCheckTool(-1, label='', bitmap1=icons.chart.GetBitmap())
#self.navTB.Realize()
self.toolbar_sizer = wx.BoxSizer(wx.VERTICAL)
self.toolbar_sizer.Add(self.navTBTop)
self.toolbar_sizer.Add(self.navTBBottom)
# --- Tool Panel
self.toolSizer= wx.BoxSizer(wx.VERTICAL)
# --- PlotType Panel
self.pltTypePanel= PlotTypePanel(self);
# --- Plot type specific options
self.spcPanel = SpectralCtrlPanel(self)
self.pdfPanel = PDFCtrlPanel(self)
self.cmpPanel = CompCtrlPanel(self)
self.mmxPanel = MinMaxPanel(self)
# --- Esthetics panel
self.esthPanel = EstheticsPanel(self)
# --- Ctrl Panel
self.ctrlPanel= wx.Panel(self)
#self.ctrlPanel.SetBackgroundColour('blue')
# Check Boxes
self.cbCurveType = wx.ComboBox(self.ctrlPanel, choices=['Plain','LS','Markers','Mix'] , style=wx.CB_READONLY)
self.cbCurveType.SetSelection(1)
self.cbSub = wx.CheckBox(self.ctrlPanel, -1, 'Subplot',(10,10))
self.cbLogX = wx.CheckBox(self.ctrlPanel, -1, 'Log-x',(10,10))
self.cbLogY = wx.CheckBox(self.ctrlPanel, -1, 'Log-y',(10,10))
self.cbSync = wx.CheckBox(self.ctrlPanel, -1, 'Sync-x',(10,10))
self.cbXHair = wx.CheckBox(self.ctrlPanel, -1, 'CrossHair',(10,10))
self.cbPlotMatrix = wx.CheckBox(self.ctrlPanel, -1, 'Matrix',(10,10))
self.cbAutoScale = wx.CheckBox(self.ctrlPanel, -1, 'AutoScale',(10,10))
self.cbGrid = wx.CheckBox(self.ctrlPanel, -1, 'Grid',(10,10))
self.cbStepPlot = wx.CheckBox(self.ctrlPanel, -1, 'StepPlot',(10,10))
self.cbMeasure = wx.CheckBox(self.ctrlPanel, -1, 'Measure',(10,10))
#self.cbSub.SetValue(True) # DEFAULT TO SUB?
self.cbSync.SetValue(True)
self.cbXHair.SetValue(True) # Have cross hair by default
self.cbAutoScale.SetValue(True)
# Callbacks
self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbSub )
self.Bind(wx.EVT_COMBOBOX, self.redraw_event , self.cbCurveType)
self.Bind(wx.EVT_CHECKBOX, self.log_select , self.cbLogX )
self.Bind(wx.EVT_CHECKBOX, self.log_select , self.cbLogY )
self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbSync )
self.Bind(wx.EVT_CHECKBOX, self.crosshair_event , self.cbXHair )
self.Bind(wx.EVT_CHECKBOX, self.plot_matrix_select, self.cbPlotMatrix )
self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbAutoScale )
self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbGrid )
self.Bind(wx.EVT_CHECKBOX, self.redraw_event , self.cbStepPlot )
self.Bind(wx.EVT_CHECKBOX, self.measure_select , self.cbMeasure )
self.Bind(wx.EVT_CHECKBOX, self.measure_select , self.cbMeasure )
# LAYOUT
cb_sizer = wx.FlexGridSizer(rows=4, cols=3, hgap=0, vgap=0)
cb_sizer.Add(self.cbCurveType , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbSub , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbAutoScale , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbLogX , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbLogY , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbStepPlot , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbXHair , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbGrid , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbSync , 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbPlotMatrix, 0, flag=wx.ALL, border=1)
cb_sizer.Add(self.cbMeasure , 0, flag=wx.ALL, border=1)
self.ctrlPanel.SetSizer(cb_sizer)
# --- Crosshair Panel
crossHairPanel= wx.Panel(self)
self.lbCrossHairX = wx.StaticText(crossHairPanel, -1, 'x = ... ')
self.lbCrossHairY = wx.StaticText(crossHairPanel, -1, 'y = ... ')
self.lbDeltaX = wx.StaticText(crossHairPanel, -1, ' ')
self.lbDeltaY = wx.StaticText(crossHairPanel, -1, ' ')
self.lbCrossHairX.SetFont(getMonoFont(self))
self.lbCrossHairY.SetFont(getMonoFont(self))
self.lbDeltaX.SetFont(getMonoFont(self))
self.lbDeltaY.SetFont(getMonoFont(self))
cbCH = wx.FlexGridSizer(rows=4, cols=1, hgap=0, vgap=0)
cbCH.Add(self.lbCrossHairX , 0, flag=wx.ALL, border=1)
cbCH.Add(self.lbCrossHairY , 0, flag=wx.ALL, border=1)
cbCH.Add(self.lbDeltaX , 0, flag=wx.ALL, border=1)
cbCH.Add(self.lbDeltaY , 0, flag=wx.ALL, border=1)
crossHairPanel.SetSizer(cbCH)
# --- layout of panels
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
sl2 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL)
sl3 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL)
sl4 = wx.StaticLine(self, -1, size=wx.Size(1,-1), style=wx.LI_VERTICAL)
row_sizer.Add(self.pltTypePanel , 0 , flag=wx.LEFT|wx.RIGHT|wx.CENTER , border=1)
row_sizer.Add(sl2 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0)
row_sizer.Add(self.toolbar_sizer, 0 , flag=wx.LEFT|wx.RIGHT|wx.CENTER , border=1)
row_sizer.Add(sl3 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0)
row_sizer.Add(self.ctrlPanel , 1 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0)
row_sizer.Add(sl4 , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=0)
row_sizer.Add(crossHairPanel , 0 , flag=wx.LEFT|wx.RIGHT|wx.EXPAND|wx.CENTER, border=1)
plotsizer = wx.BoxSizer(wx.VERTICAL)
self.slCtrl = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL)
self.slCtrl.Hide()
self.slEsth = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL)
self.slEsth.Hide()
sl1 = wx.StaticLine(self, -1, size=wx.Size(-1,1), style=wx.LI_HORIZONTAL)
plotsizer.Add(self.toolSizer,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.canvas ,1,flag = wx.EXPAND,border = 5 )
plotsizer.Add(sl1 ,0,flag = wx.EXPAND,border = 0)
plotsizer.Add(self.spcPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.pdfPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.cmpPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.mmxPanel ,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.slEsth ,0,flag = wx.EXPAND,border = 0)
plotsizer.Add(self.esthPanel,0,flag = wx.EXPAND|wx.CENTER|wx.TOP|wx.BOTTOM,border = 10)
plotsizer.Add(self.slCtrl ,0,flag = wx.EXPAND,border = 0)
plotsizer.Add(row_sizer ,0,flag = wx.EXPAND|wx.NORTH ,border = 2)
self.show_hide(self.spcPanel, self.pltTypePanel.cbFFT.GetValue())
self.show_hide(self.cmpPanel, self.pltTypePanel.cbCompare.GetValue())
self.show_hide(self.pdfPanel, self.pltTypePanel.cbPDF.GetValue())
self.show_hide(self.mmxPanel, self.pltTypePanel.cbMinMax.GetValue())
self.SetSizer(plotsizer)
self.plotsizer=plotsizer;
self.set_subplot_spacing(init=True)
def onEsthToggle(self,event):
self.esthToggle=not self.esthToggle
if self.esthToggle:
self.slCtrl.Show()
self.esthPanel.Show()
else:
self.slCtrl.Hide()
self.esthPanel.Hide()
self.plotsizer.Layout()
event.Skip()
def set_subplot_spacing(self, init=False):
"""
Handle default subplot spacing
NOTE:
- Tight fails when the ylabel is too long, especially for fft with multiplt signals
- might need to change depending on window size/resizing
- need to change if right axis needed
- this will override the user settings
"""
#self.fig.set_tight_layout(True) # NOTE: works almost fine, but problem with FFT multiple
# TODO this is definitely not generic, but tight fails..
if init:
# NOTE: at init size is (20,20) because sizer is not initialized yet
bottom = 0.12
left = 0.12
else:
if self.Size[1]<300:
bottom=0.20
elif self.Size[1]<350:
bottom=0.18
elif self.Size[1]<430:
bottom=0.16
elif self.Size[1]<600:
bottom=0.13
elif self.Size[1]<800:
bottom=0.09
else:
bottom=0.07
if self.Size[0]<300:
left=0.22
elif self.Size[0]<450:
left=0.20
elif self.Size[0]<950:
left=0.12
else:
left=0.06
#print(self.Size,'bottom', bottom, 'left',left)
if self.cbPlotMatrix.GetValue(): # TODO detect it
self.fig.subplots_adjust(top=0.97,bottom=bottom,left=left,right=0.98-left)
else:
self.fig.subplots_adjust(top=0.97,bottom=bottom,left=left,right=0.98)
def plot_matrix_select(self, event):
self.infoPanel.togglePlotMatrix(self.cbPlotMatrix.GetValue())
self.redraw_same_data()
def measure_select(self, event):
if self.cbMeasure.IsChecked():
self.cbAutoScale.SetValue(False)
self.redraw_same_data()
def redraw_event(self, event):
self.redraw_same_data()
def log_select(self, event):
if self.pltTypePanel.cbPDF.GetValue():
self.cbLogX.SetValue(False)
self.cbLogY.SetValue(False)
else:
self.redraw_same_data()
def crosshair_event(self, event):
try:
self.multiCursors.vertOn =self.cbXHair.GetValue()
self.multiCursors.horizOn=self.cbXHair.GetValue()
self.multiCursors._update()
except:
pass
def show_hide(self,panel,bShow):
if bShow:
panel.Show()
self.slEsth.Show()
else:
self.slEsth.Hide()
panel.Hide()
@property
def sharex(self):
return self.cbSync.IsChecked() and (not self.pltTypePanel.cbPDF.GetValue())
def set_subplots(self,nPlots):
self.set_subplot_spacing()
# Creating subplots
for ax in self.fig.axes:
self.fig.delaxes(ax)
sharex=None
for i in range(nPlots):
# Vertical stack
if i==0:
ax=self.fig.add_subplot(nPlots,1,i+1)
if self.sharex:
sharex=ax
else:
ax=self.fig.add_subplot(nPlots,1,i+1,sharex=sharex)
# Horizontal stack
#self.fig.add_subplot(1,nPlots,i+1)
def onMouseMove(self, event):
if event.inaxes:
x, y = event.xdata, event.ydata
self.lbCrossHairX.SetLabel('x =' + self.formatLabelValue(x))
self.lbCrossHairY.SetLabel('y =' + self.formatLabelValue(y))
def onMouseClick(self, event):
self.clickLocation = (event.inaxes, event.xdata, event.ydata)
def onMouseRelease(self, event):
if self.cbMeasure.GetValue():
for ax, ax_idx in zip(self.fig.axes, range(len(self.fig.axes))):
if event.inaxes == ax:
x, y = event.xdata, event.ydata
if self.clickLocation != (ax, x, y):
# Ignore measurements for zoom-actions. Possibly add small tolerance.
# Zoom-actions disable autoscale
self.cbAutoScale.SetValue(False)
return
if event.button == 1:
self.infoPanel.setMeasurements((x, y), None)
self.leftMeasure.set(ax_idx, x, y)
self.leftMeasure.plot(ax, ax_idx)
elif event.button == 3:
self.infoPanel.setMeasurements(None, (x, y))
self.rightMeasure.set(ax_idx, x, y)
self.rightMeasure.plot(ax, ax_idx)
else:
return
if self.cbAutoScale.IsChecked() is False:
self._restore_limits()
if self.leftMeasure.axis_idx == self.rightMeasure.axis_idx and self.leftMeasure.axis_idx != -1:
self.lbDeltaX.SetLabel('dx=' + self.formatLabelValue(self.rightMeasure.x - self.leftMeasure.x))
self.lbDeltaY.SetLabel('dy=' + self.formatLabelValue(self.rightMeasure.y - self.leftMeasure.y))
else:
self.lbDeltaX.SetLabel('')
self.lbDeltaY.SetLabel('')
return
def onDraw(self, event):
self._store_limits()
def formatLabelValue(self, value):
try:
if abs(value)<1000 and abs(value)>1e-4:
s = '{:10.5f}'.format(value)
else:
s = '{:10.3e}'.format(value)
except TypeError:
s = ' '
return s
def removeTools(self,event=None,Layout=True):
try:
self.toolPanel.destroy() # call the "destroy" function which might clean up data
except:
pass
try:
# Python3
self.toolSizer.Clear(delete_windows=True) # Delete Windows
except:
# Python2
if hasattr(self,'toolPanel'):
self.toolSizer.Remove(self.toolPanel)
self.toolPanel.Destroy()
del self.toolPanel
self.toolSizer.Clear() # Delete Windows
if Layout:
self.plotsizer.Layout()
def showTool(self,toolName=''):
from .GUITools import TOOLS
self.Freeze()
self.removeTools(Layout=False)
if toolName in TOOLS.keys():
self.toolPanel=TOOLS[toolName](self) # calling the panel constructor
else:
raise Exception('Unknown tool {}'.format(toolName))
self.toolSizer.Add(self.toolPanel, 0, wx.EXPAND|wx.ALL, 5)
self.plotsizer.Layout()
self.Thaw()
def setPD_PDF(self,PD,c):
""" Convert plot data to PDF data based on GUI options"""
# ---PDF
nBins = self.pdfPanel.scBins.GetValue()
bSmooth = self.pdfPanel.cbSmooth.GetValue()
nBins_out= PD.toPDF(nBins,bSmooth)
if nBins_out!=nBins:
self.pdfPanel.scBins.SetValue(nBins)
def setPD_MinMax(self,PD):
""" Convert plot data to MinMax data based on GUI options"""
yScale=self.mmxPanel.cbyMinMax.IsChecked()
xScale=self.mmxPanel.cbxMinMax.IsChecked()
try:
PD.toMinMax(xScale,yScale)
except Exception as e:
self.mmxPanel.cbxMinMax.SetValue(False)
raise e # Used to be Warn
def setPD_FFT(self,pd):
""" Convert plot data to FFT data based on GUI options"""
yType = self.spcPanel.cbType.GetStringSelection()
xType = self.spcPanel.cbTypeX.GetStringSelection()
avgMethod = self.spcPanel.cbAveraging.GetStringSelection()
avgWindow = self.spcPanel.cbAveragingMethod.GetStringSelection()
bDetrend = self.spcPanel.cbDetrend.IsChecked()
nExp = self.spcPanel.scP2.GetValue()
# Convert plotdata to FFT data
try:
Info = pd.toFFT(yType=yType, xType=xType, avgMethod=avgMethod, avgWindow=avgWindow, bDetrend=bDetrend, nExp=nExp)
# Trigger
if hasattr(Info,'nExp') and Info.nExp!=nExp:
self.spcPanel.scP2.SetValue(Info.nExp)
self.spcPanel.updateP2(Info.nExp)
except Exception as e:
self.spcPanel.Hide();
self.plotsizer.Layout()
raise e
def transformPlotData(self,PD):
""""
Apply MinMax, PDF or FFT transform to plot based on GUI data
"""
plotType=self.pltTypePanel.plotType()
if plotType=='MinMax':
self.setPD_MinMax(PD)
elif plotType=='PDF':
self.setPD_PDF(PD,PD.c)
elif plotType=='FFT':
self.setPD_FFT(PD)
def getPlotData(self,plotType):
ID,SameCol,selMode=self.selPanel.getPlotDataSelection()
self.selMode=selMode # we store the selection mode
del self.plotData
self.plotData=[]
tabs=self.selPanel.tabList.getTabs() # TODO, selPanel should just return the PlotData...
try:
for i,idx in enumerate(ID):
# Initialize each plotdata based on selected table and selected id channels
pd=PlotData();
pd.fromIDs(tabs,i,idx,SameCol, self.plotDataOptions)
# Possible change of data
if plotType=='MinMax':
self.setPD_MinMax(pd)
elif plotType=='PDF':
self.setPD_PDF(pd,pd.c)
elif plotType=='FFT':
self.setPD_FFT(pd)
self.plotData.append(pd)
except Exception as e:
self.plotData=[]
raise e
def PD_Compare(self,mode):
""" Perform comparison of the selected PlotData, returns new plotData with the comparison. """
sComp = self.cmpPanel.rbType.GetStringSelection()
try:
self.plotData = compareMultiplePD(self.plotData,mode, sComp)
except Exception as e:
self.pltTypePanel.cbRegular.SetValue(True)
raise e
def _onPlotMatrixLeftClick(self, event):
"""Toggle plot-states from None, to left-axis, to right-axis.
Left-click goes forwards, right-click goes backwards.
IndexError to avoid "holes" in matrix with outer adjacent populated entries
"""
btn = event.GetEventObject()
label = btn.GetLabelText()
if label == '-':
btn.SetLabel('1')
try:
self.infoPanel.getPlotMatrix(self.plotData, self.cbSub.IsChecked())
except IndexError:
btn.SetLabel('-')
elif label == '1':
btn.SetLabel('2')
else:
btn.SetLabel('-')
try:
self.infoPanel.getPlotMatrix(self.plotData, self.cbSub.IsChecked())
except IndexError:
btn.SetLabel('1')
self.redraw_same_data()
def _onPlotMatrixRightClick(self, event):
btn = event.GetEventObject()
label = btn.GetLabelText()
if label == '-':
btn.SetLabel('2')
try:
self.infoPanel.getPlotMatrix(self.plotData, self.cbSub.IsChecked())
except IndexError:
btn.SetLabel('-')
elif label == '1':
btn.SetLabel('-')
try:
self.infoPanel.getPlotMatrix(self.plotData, self.cbSub.IsChecked())
except IndexError:
btn.SetLabel('2')
else:
btn.SetLabel('1')
self.redraw_same_data()
def set_axes_lim(self, PDs, axis):
"""
It's usually faster to set the axis limits first (before plotting)
and disable autoscaling. This way the limits are not recomputed when plot data are added.
Also, we already have computed the min and max, so we leverage that.
NOTE:
doesnt not work with strings
doesnt not work for FFT and compare
INPUTS:
PDs: list of plot data
"""
# TODO option for tight axes
tight=False
plotType=self.pltTypePanel.plotType()
if plotType in ['FFT','Compare']:
axis.autoscale(True, axis='both', tight=tight)
return
vXString=[PDs[i].xIsString for i in axis.iPD]
vYString=[PDs[i].yIsString for i in axis.iPD]
if not any(vXString) and not self.cbLogX.IsChecked():
try:
xMin=np.min([PDs[i]._xMin[0] for i in axis.iPD])
xMax=np.max([PDs[i]._xMax[0] for i in axis.iPD])
if np.isclose(xMin,xMax):
delta=1 if np.isclose(xMax,0) else 0.1*xMax
else:
if tight:
delta=0
else:
delta = (xMax-xMin)*pyplot_rc['axes.xmargin']
axis.set_xlim(xMin-delta,xMax+delta)
axis.autoscale(False, axis='x', tight=False)
except:
pass
if not any(vYString) and not self.cbLogY.IsChecked():
try:
yMin=np.min([PDs[i]._yMin[0] for i in axis.iPD])
yMax=np.max([PDs[i]._yMax[0] for i in axis.iPD])
delta = (yMax-yMin)*pyplot_rc['axes.ymargin']
if np.isclose(yMin,yMax):
delta=1 if np.isclose(yMax,0) else 0.1*yMax
else:
if tight:
delta=0
else:
delta = (yMax-yMin)*pyplot_rc['axes.xmargin']
axis.set_ylim(yMin-delta,yMax+delta)
axis.autoscale(False, axis='y', tight=False)
except:
pass
def plot_all(self, keep_limits=True):
self.multiCursors=[]
if self.cbMeasure.GetValue() is False:
for measure in [self.leftMeasure, self.rightMeasure]:
measure.clear()
self.infoPanel.setMeasurements(None, None)
self.lbDeltaX.SetLabel('')
self.lbDeltaY.SetLabel('')
axes=self.fig.axes
PD=self.plotData
# --- Plot options
bStep = self.cbStepPlot.IsChecked()
plot_options = dict()
plot_options['lw']=float(self.esthPanel.cbLW.Value)
plot_options['ms']=float(self.esthPanel.cbMS.Value)
if self.cbCurveType.Value=='Plain':
plot_options['LineStyles'] = ['-']
plot_options['Markers'] = ['']
elif self.cbCurveType.Value=='LS':
plot_options['LineStyles'] = ['-','--','-.',':']
plot_options['Markers'] = ['']
elif self.cbCurveType.Value=='Markers':
plot_options['LineStyles'] = ['']
plot_options['Markers'] = ['o','d','v','^','s']
elif self.cbCurveType.Value=='Mix': # NOTE, can be improved
plot_options['LineStyles'] = ['-','--', '-','-','-']
plot_options['Markers'] = ['' ,'' ,'o','^','s']
else:
# Combination of linestyles markers, colors, etc.
# But at that stage, if the user really want this, then we can implement an option to set styles per plot. Not high priority.
raise Exception('Not implemented')
# --- Font options
font_options = dict()
font_options_legd = dict()
font_options['size'] = int(self.esthPanel.cbFont.Value) # affect labels
font_options_legd['fontsize'] = int(self.esthPanel.cbLgdFont.Value)
needChineseFont = any([pd.needChineseFont for pd in PD])
if needChineseFont and self.specialFont is not None:
font_options['fontproperties']= self.specialFont
font_options_legd['prop'] = self.specialFont
# --- Loop on axes. Either use ax.iPD to chose the plot data, or rely on plotmatrix
for axis_idx, ax_left in enumerate(axes):
ax_right = None
# Checks
vDate=[PD[i].yIsDate for i in ax_left.iPD]
if any(vDate) and len(vDate)>1:
Error(self,'Cannot plot date and other value on the same axis')
return
# Set limit before plot when possible, for optimization
self.set_axes_lim(PD, ax_left)
# Actually plot
pm = self.infoPanel.getPlotMatrix(PD, self.cbSub.IsChecked())
__, bAllNegLeft = self.plotSignals(ax_left, axis_idx, PD, pm, 1, bStep, plot_options)
ax_right, bAllNegRight = self.plotSignals(ax_left, axis_idx, PD, pm, 2, bStep, plot_options)
self.infoPanel.setMeasurements(self.leftMeasure.get_xydata(), self.rightMeasure.get_xydata())
for measure in [self.leftMeasure, self.rightMeasure]:
measure.plot(ax_left, axis_idx)
# Log Axes
if self.cbLogX.IsChecked():
ax_left.set_xscale("log", nonposx='clip')
if self.cbLogY.IsChecked():
if bAllNegLeft is False:
ax_left.set_yscale("log", nonposy='clip')
if bAllNegRight is False and ax_right is not None:
ax_right.set_yscale("log", nonposy='clip')
# XLIM - TODO FFT ONLY NASTY
if self.pltTypePanel.cbFFT.GetValue():
try:
xlim=float(self.spcPanel.tMaxFreq.GetLineText(0))
if xlim>0:
ax_left.set_xlim([0,xlim])
pd=PD[ax_left.iPD[0]]
I=pd.x<xlim
ymin = np.min([np.min(PD[ipd].y[I]) for ipd in ax_left.iPD])
ax_left.set_ylim(bottom=ymin/2)
if self.spcPanel.cbTypeX.GetStringSelection()=='x':
ax_left.invert_xaxis()
except:
pass
elif self.cbAutoScale.IsChecked() is False and keep_limits:
self._restore_limits()
ax_left.grid(self.cbGrid.IsChecked())
if ax_right is not None:
l = ax_left.get_ylim()
l2 = ax_right.get_ylim()
f = lambda x : l2[0]+(x-l[0])/(l[1]-l[0])*(l2[1]-l2[0])
ticks = f(ax_left.get_yticks())
ax_right.yaxis.set_major_locator(matplotlib.ticker.FixedLocator(ticks))
if len(ax_left.lines) == 0:
ax_left.set_yticks(ax_right.get_yticks())
ax_left.yaxis.set_visible(False)
ax_right.grid(self.cbGrid.IsChecked())
# Special Grids
if self.pltTypePanel.cbCompare.GetValue():
if self.cmpPanel.rbType.GetStringSelection()=='Y-Y':
xmin,xmax=ax_left.get_xlim()
ax_left.plot([xmin,xmax],[xmin,xmax],'k--',linewidth=0.5)
# Labels
yleft_labels = []
yright_labels = []
yleft_legends = []
yright_legends = []
if pm is None:
yleft_labels = unique([PD[i].sy for i in ax_left.iPD])
if axis_idx == 0:
yleft_legends = unique([PD[i].syl for i in ax_left.iPD])
else:
for signal_idx in range(len(PD)):
if pm[signal_idx][axis_idx] == 1:
yleft_labels.append(PD[signal_idx].sy)
yleft_legends.append(PD[signal_idx].syl)
elif pm[signal_idx][axis_idx] == 2:
yright_labels.append(PD[signal_idx].sy)
yright_legends.append(PD[signal_idx].syl)
yleft_labels = unique(yleft_labels)
yright_labels = unique(yright_labels)
yleft_legends = unique(yleft_legends)
yright_legends = unique(yright_legends)
if len(yleft_labels) > 0 and len(yleft_labels) <= 3:
ax_left.set_ylabel(' and '.join(yleft_labels), **font_options)
elif ax_left is not None:
ax_left.set_ylabel('')
if len(yright_labels) > 0 and len(yright_labels) <= 3:
ax_right.set_ylabel(' and '.join(yright_labels), **font_options)
elif ax_right is not None:
ax_right.set_ylabel('')
# Legends
lgdLoc = self.esthPanel.cbLegend.Value.lower()
if (self.pltTypePanel.cbCompare.GetValue() or
((len(yleft_legends) + len(yright_legends)) > 1)):
if lgdLoc !='none':
if len(yleft_legends) > 0:
ax_left.legend(fancybox=False, loc=lgdLoc, **font_options_legd)
if ax_right is not None and len(yright_legends) > 0:
ax_right.legend(fancybox=False, loc=4, **font_options_legd)
elif len(axes)>1 and len(axes)==len(PD):
# TODO: can this be removed? If there is only one unique signal
# per subplot, normally only ylabel is displayed and no legend.
# Special case when we have subplots and all plots have the same label
if lgdLoc !='none':
usy = unique([pd.sy for pd in PD])
if len(usy)==1:
for ax in axes:
ax.legend(fancybox=False, loc=lgdLoc, **font_options_legd)
axes[-1].set_xlabel(PD[axes[-1].iPD[0]].sx, **font_options)
#print('sy :',[pd.sy for pd in PD])
#print('syl:',[pd.syl for pd in PD])
# --- Cursors for each individual plot
# NOTE: cursors needs to be stored in the object!
#for ax_left in self.fig.axes:
# self.cursors.append(MyCursor(ax_left,horizOn=True, vertOn=False, useblit=True, color='gray', linewidth=0.5, linestyle=':'))
# Vertical cusor for all, commonly
bXHair = self.cbXHair.GetValue()
self.multiCursors = MyMultiCursor(self.canvas, tuple(self.fig.axes), useblit=True, horizOn=bXHair, vertOn=bXHair, color='gray', linewidth=0.5, linestyle=':')
def plotSignals(self, ax, axis_idx, PD, pm, left_right, is_step, opts):
axis = None
bAllNeg = True
if pm is None:
loop_range = ax.iPD
else:
loop_range = range(len(PD))
iPlot=-1
for signal_idx in loop_range:
do_plot = False
if left_right == 1 and (pm is None or pm[signal_idx][axis_idx] == left_right):
do_plot = True
axis = ax
elif left_right == 2 and pm is not None and pm[signal_idx][axis_idx] == left_right:
do_plot = True
if axis is None:
axis = ax.twinx()
ax.set_zorder(axis.get_zorder()+1)
ax.patch.set_visible(False)
axis._get_lines.prop_cycler = ax._get_lines.prop_cycler
pd=PD[signal_idx]
if do_plot:
iPlot+=1
# --- styling per plot
if len(pd.x)==1:
marker='o'; ls=''
else:
# TODO allow PlotData to override for "per plot" options in the future
marker = opts['Markers'][np.mod(iPlot,len(opts['Markers']))]
ls = opts['LineStyles'][np.mod(iPlot,len(opts['LineStyles']))]
if is_step:
plot = axis.step
else:
plot = axis.plot
plot(pd.x,pd.y,label=pd.syl,ms=opts['ms'], lw=opts['lw'], marker=marker, ls=ls)
try:
bAllNeg = bAllNeg and all(pd.y<=0)
except:
pass # Dates or strings
return axis, bAllNeg
def findPlotMode(self,PD):
uTabs = unique([pd.it for pd in PD])
usy = unique([pd.sy for pd in PD])
uiy = unique([pd.iy for pd in PD])
if len(uTabs)<=0:
raise Exception('No Table. Contact developer')
if len(uTabs)==1:
mode='1Tab_nCols'
else:
if PD[0].SameCol:
mode='nTabs_SameCols'
else:
# Now that we allow multiple selections detecting "simColumns" is more difficult
if len(uTabs) == len(PD):
mode='nTabs_1Col'
elif self.selMode=='simColumnsMode':
mode='nTabs_SimCols'
else:
mode='nTabs_mCols'
return mode
def findSubPlots(self,PD,mode):
uTabs = unique([pd.it for pd in PD])
usy = unique([pd.sy for pd in PD])
bSubPlots = self.cbSub.IsChecked()
bCompare = self.pltTypePanel.cbCompare.GetValue() # NOTE bCompare somehow always 1Tab_nCols
nSubPlots=1
spreadBy='none'
self.infoPanel.setTabMode(mode)
if mode=='1Tab_nCols':
if bSubPlots:
if bCompare or len(uTabs)==1:
nSubPlots = self.infoPanel.getNumberOfSubplots(PD, bSubPlots)
else:
nSubPlots=len(usy)
spreadBy='iy'
elif mode=='nTabs_SameCols':
if bSubPlots:
if bCompare:
print('>>>TODO ',mode,len(usy),len(uTabs))
else:
if len(usy)==1:
# Temporary hack until we have an option for spread by tabs or col
nSubPlots=len(uTabs)
spreadBy='it'
else:
nSubPlots=len(usy)
spreadBy='iy'
elif mode=='nTabs_SimCols':
if bSubPlots:
if bCompare:
print('>>>TODO ',mode,len(usy),len(uTabs))
else:
nSubPlots=int(len(PD)/len(uTabs))
spreadBy='mod-ip'
elif mode=='nTabs_mCols':
if bSubPlots:
if bCompare:
print('>>>TODO ',mode,len(usy),len(uTabs))
else:
if bCompare or len(uTabs)==1:
nSubPlots = self.infoPanel.getNumberOfSubplots(PD, bSubPlots)
else:
nSubPlots=len(PD)
spreadBy='mod-ip'
elif mode=='nTabs_1Col':
if bSubPlots:
if bCompare:
print('>>> TODO',mode,len(uTabs))
else:
nSubPlots=len(uTabs)
spreadBy='it'
else:
raise Exception('Unknown mode, contact developer.')
return nSubPlots,spreadBy
def distributePlots(self,mode,nSubPlots,spreadBy):
""" Assigns plot data to axes and axes to plot data """
axes=self.fig.axes
# Link plot data to axes
if nSubPlots==1 or spreadBy=='none':
axes[0].iPD=[i for i in range(len(self.plotData))]
else:
for ax in axes:
ax.iPD=[]
PD=self.plotData
uTabs=unique([pd.it for pd in PD])
uiy=unique([pd.iy for pd in PD])
if spreadBy=='iy':
for ipd,pd in enumerate(PD):
i=uiy.index(pd.iy)
if i < len(axes):
axes[i].iPD.append(ipd)
elif spreadBy=='it':
for ipd,pd in enumerate(PD):
i=uTabs.index(pd.it)
axes[i].iPD.append(ipd)
elif spreadBy=='mod-ip':
for ipd,pd in enumerate(PD):
i=np.mod(ipd, nSubPlots)
axes[i].iPD.append(ipd)
else:
raise Exception('Wrong spreadby value')
def setLegendLabels(self,mode):
""" Set labels for legend """
if mode=='1Tab_nCols':
for pd in self.plotData:
if self.pltTypePanel.cbMinMax.GetValue():
pd.syl = no_unit(pd.sy)
else:
pd.syl = pd.sy
elif mode=='nTabs_SameCols':
for pd in self.plotData:
pd.syl=pd.st
elif mode=='nTabs_1Col':
usy=unique([pd.sy for pd in self.plotData])
if len(usy)==1:
for pd in self.plotData:
pd.syl=pd.st
else:
for pd in self.plotData:
if self.pltTypePanel.cbMinMax.GetValue():
pd.syl=no_unit(pd.sy)
else:
pd.syl=pd.sy #pd.syl=pd.st + ' - '+pd.sy
elif mode=='nTabs_SimCols':
bSubPlots = self.cbSub.IsChecked()
if bSubPlots: # spread by table name
for pd in self.plotData:
pd.syl=pd.st
else:
for pd in self.plotData:
pd.syl=pd.st + ' - '+pd.sy
elif mode=='nTabs_mCols':
usy=unique([pd.sy for pd in self.plotData])
bSubPlots = self.cbSub.IsChecked()
if bSubPlots and len(usy)==1: # spread by table name
for pd in self.plotData:
pd.syl=pd.st
else:
for pd in self.plotData:
pd.syl=pd.st + ' - '+pd.sy
else:
raise Exception('Unknown mode {}'.format(mode))
def empty(self):
self.cleanPlot()
def clean_memory(self):
if hasattr(self,'plotData'):
del self.plotData
self.plotData=[]
for ax in self.fig.axes:
ax.iPD=[]
self.fig.delaxes(ax)
gc.collect()
def clean_memory_plot(self):
pass
def cleanPlot(self):
for ax in self.fig.axes:
if hasattr(ax,'iPD'):
del ax.iPD
self.fig.delaxes(ax)
gc.collect()
self.fig.add_subplot(111)
ax = self.fig.axes[0]
ax.set_axis_off()
#ax.plot(1,1)
self.canvas.draw()
gc.collect()
def load_and_draw(self):
""" Full draw event:
- Get plot data based on selection
- Plot them
- Trigger changes to infoPanel
"""
self.clean_memory()
self.getPlotData(self.pltTypePanel.plotType())
if len(self.plotData)==0:
self.cleanPlot();
return
mode=self.findPlotMode(self.plotData)
if self.pltTypePanel.cbCompare.GetValue():
self.PD_Compare(mode)
if len(self.plotData)==0:
self.cleanPlot();
return
self.redraw_same_data()
if self.infoPanel is not None:
self.infoPanel.showStats(self.plotData,self.pltTypePanel.plotType())
def redraw_same_data(self, keep_limits=True):
if len(self.plotData)==0:
self.cleanPlot();
return
elif len(self.plotData) == 1:
if self.plotData[0].xIsString or self.plotData[0].yIsString or self.plotData[0].xIsDate or self.plotData[0].yIsDate:
self.cbAutoScale.SetValue(True)
else:
if len(self.xlim_prev)==0: # Might occur if some date didn't plot before (e.g. strings)
self.cbAutoScale.SetValue(True)
elif rectangleOverlap(self.plotData[0]._xMin[0], self.plotData[0]._yMin[0],
self.plotData[0]._xMax[0], self.plotData[0]._yMax[0],
self.xlim_prev[0][0], self.ylim_prev[0][0],
self.xlim_prev[0][1], self.ylim_prev[0][1]):
pass
else:
self.cbAutoScale.SetValue(True)
mode=self.findPlotMode(self.plotData)
nPlots,spreadBy=self.findSubPlots(self.plotData,mode)
self.clean_memory_plot()
self.set_subplots(nPlots)
self.distributePlots(mode,nPlots,spreadBy)
if not self.pltTypePanel.cbCompare.GetValue():
self.setLegendLabels(mode)
self.plot_all(keep_limits)
self.canvas.draw()
def _store_limits(self):
self.xlim_prev = []
self.ylim_prev = []
for ax in self.fig.axes:
self.xlim_prev.append(ax.get_xlim())
self.ylim_prev.append(ax.get_ylim())
def _restore_limits(self):
for ax, xlim, ylim in zip(self.fig.axes, self.xlim_prev, self.ylim_prev):
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if __name__ == '__main__':
import pandas as pd;
from Tables import Table,TableList
app = wx.App(False)
self=wx.Frame(None,-1,"Title")
self.SetSize((800, 600))
#self.SetBackgroundColour('red')
class FakeSelPanel(wx.Panel):
def __init__(self, parent):
super(FakeSelPanel,self).__init__(parent)
d ={'ColA': | np.linspace(0,1,100) | numpy.linspace |
import argparse
import torch
import torch.nn as nn
from torchsummary import summary
import torch.nn.functional as F
import os
import utils
import audio_recognize as ar
import audio_process as ap
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from scipy.spatial import distance
from scipy.special import erf, erfc
config_data = utils.load_config("config.json")
ST_WIN = config_data['params']['ST_WIN']
ST_STEP = config_data['params']['ST_STEP']
MIN_VOC_DUR = config_data['params']['MIN_VOC_DUR']
F1 = config_data['params']['F1']
F2 = config_data['params']['F2']
thres = config_data['params']['thres']
factor = config_data['params']['factor']
gamma_1 = config_data['params']['gamma_1']
gamma_2 = config_data['params']['gamma_2']
gamma_3 = config_data['params']['gamma_3']
model_name = config_data['params']['model']
num_ann_per_batch = config_data['params']['num_ann_per_batch']
class ConvAutoencoder(nn.Module):
def __init__(self, n_clusters = 5, kmeans_centers=None):
super(ConvAutoencoder, self).__init__()
## encoder layers ##
# conv layer (depth from 3 --> 64), 3x3 kernels
self.conv1 = nn.Conv2d(1, 64, 3, padding =1)
# conv layer (depth from 64 --> 32), 3x3 kernels
self.conv2 = nn.Conv2d(64, 32, 3, padding=1)
# conv layer (depth from 32 --> 8), 3x3 kernels
self.conv3 = nn.Conv2d(32, 8, 3, padding=1)
self.pool = nn.MaxPool2d((2,2), 2)
self.flatten = nn.Flatten()
## decoder layers ##
# a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(8, 32, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(32, 64, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(64, 1, 2, stride=2)
def forward(self, x, decode = True, clustering = False, kmeans_centers=None):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x)
# add third hidden layer
x = F.relu(self.conv3(x))
x = self.pool(x) # compressed representation
if self.training and decode:
## decode ##
# add transpose conv layers, with relu activation function
y = F.relu(self.t_conv1(x))
y = F.relu(self.t_conv2(y))
# output layer (with sigmoid for scaling from 0 to 1)
y = F.sigmoid(self.t_conv3(y))
if not clustering:
return x, y
if self.training and clustering:
x = self.flatten(x)
dist =torch.cdist(x, kmeans_centers)
q = ((1+dist.pow(2)).pow(-1))/torch.sum((1+dist.pow(2)).pow(-1),dim=1).view(torch.sum((1+dist.pow(2)).pow(-1),dim=1).shape[0],1)
if decode:
return x, y, q
else:
return x, q
return x
def parse_arguments():
"""Parse arguments for real time demo.
"""
parser = argparse.ArgumentParser(description="Amvoc")
parser.add_argument("-i", "--data_folder", required=True, nargs=None,
help="Folder")
parser.add_argument("-ne", "--num_of_epochs", required=True, nargs=None,
help="Parameter")
parser.add_argument("-s", "--save_model", required=False, nargs=None,
help="Condition")
return parser.parse_args()
def load_spectrograms (filename):
train_data = []
print(filename)
spectrogram, sp_time, sp_freq, fs = ap.get_spectrogram(filename,
ST_WIN, ST_STEP)
# These should change depending on the signal's size
spec_resize_ratio_freq = 4
spec_resize_ratio_time = 4
f_low = F1 if F1 < fs / 2.0 else fs / 2.0
f_high = F2 if F2 < fs / 2.0 else fs / 2.0
# define feature sequence for vocalization detection
f1 = np.argmin( | np.abs(sp_freq - f_low) | numpy.abs |
import os
import sys
from collections import namedtuple
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg
as FigureCanvas)
from matplotlib.figure import Figure
from PyQt5 import QtCore, QtGui, QtWidgets
import pandas as pd
import numba
from sklearn.cluster import DBSCAN
from skimage import io, color
TableParams = namedtuple('TableParams',
['image_shape', 'pixel_scale', 'invert_y'])
DEFAULTPARAMS = TableParams(image_shape=(2048, 2048),
pixel_scale=10,
invert_y=True)
def find_header_line(filename):
with open(filename) as fin:
for i, line in enumerate(fin):
if line.rstrip() == '##':
return (i - 1)
return None
def read_locations_table(filename):
header_line = find_header_line(filename)
skiprows = list(range(header_line)) + [header_line + 1]
table = pd.read_csv(filename, skiprows=skiprows, delimiter='\t')
return table
def image_coords(location_table, params=DEFAULTPARAMS):
image_shape = params.image_shape
scale = params.pixel_scale
xs, ys = location_table[['X_COORD', 'Y_COORD']].values.T
rows, cols = ys / scale, xs / scale
if params.invert_y:
rows = image_shape[0] - rows
return rows, cols
def image_coords_indices(location_table, params=DEFAULTPARAMS):
rows, cols = image_coords(location_table, params)
rrows, rcols = np.round(rows).astype(int), np.round(cols).astype(int)
filter_rows = (0 <= rrows) & (rrows < params.image_shape[0])
filter_cols = (0 <= rcols) & (rcols < params.image_shape[1])
filter_all = filter_cols & filter_rows
return rrows[filter_all], rcols[filter_all], filter_all
@numba.njit
def _fill_image(image, rows, cols):
for i, j in zip(rows, cols):
image[i, j] += 1
def _stretchlim(image, bottom=0.001, top=None, in_place=True):
"""Stretch the image so new image range corresponds to given quantiles.
Parameters
----------
image : array, shape (M, N, [...,] P)
The input image.
bottom : float, optional
The lower quantile.
top : float, optional
The upper quantile. If not provided, it is set to 1 - `bottom`.
in_place : bool, optional
If True, modify the input image in-place (only possible if
it is a float image).
Returns
-------
out : np.ndarray of float
The stretched image.
"""
if in_place and np.issubdtype(image.dtype, np.float):
out = image
else:
out = np.empty(image.shape, np.float32)
out[:] = image
if top is None:
top = 1 - bottom
q0, q1 = np.percentile(image, [100*bottom, 100*top])
out -= q0
if q1 > q0:
out /= q1 - q0
out = np.clip(out, 0, 1, out=out)
return out
def image_from_table(location_table, params=DEFAULTPARAMS, stretch=0.001):
rows, cols, _ = image_coords_indices(location_table)
image = np.zeros(params.image_shape, dtype=float)
_fill_image(image, rows, cols)
image = _stretchlim(image, stretch)
return image
def select_roi(image, rois=None, ax=None, axim=None, qtapp=None):
"""Return a label image based on polygon selections made with the mouse.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
rois : list, optional
If given, append ROIs to this existing list. Otherwise a new list
object will be created.
ax : matplotlib Axes, optional
The Axes on which to do the plotting.
axim : matplotlib AxesImage, optional
An existing AxesImage on which to show the image.
qtapp : QtApplication
The main Qt application for ROI selection. If given, the ROIs will
be inserted at the right location for the image index.
Returns
-------
rois : list of tuple of ints
The selected regions, in the form
[[(row_start, row_end), (col_start, col_end)]].
Notes
-----
Use left click to select the vertices of the polygon
and right click to confirm the selection once all vertices are selected.
Examples
--------
>>> from skimage import data, future, io
>>> camera = data.camera()
>>> mask = future.manual_polygon_segmentation(camera) # doctest: +SKIP
>>> io.imshow(mask) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
if image.ndim not in (2, 3):
raise ValueError('Only 2D grayscale or RGB images are supported.')
if ax is None and axim is None:
fig, ax = plt.subplots()
if axim is None:
ax.clear()
axim = ax.imshow(image, cmap="magma")
ax.set_axis_off()
else:
axim.set_array(image)
rois = rois or []
def toggle_selector(event):
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
toggle_selector.RS.set_active(True)
def onselect(eclick, erelease):
starts = round(eclick.ydata), round(eclick.xdata)
ends = round(erelease.ydata), round(erelease.xdata)
slices = tuple((int(s), int(e)) for s, e in zip(starts, ends))
if qtapp is None:
rois.append(slices)
else:
index = qtapp.image_index
rois[index] = slices
qtapp.rectangle_selector.set_active(False)
qtapp.select_next_image()
qtapp.rectangle_selector.set_active(True)
selector = RectangleSelector(ax, onselect, useblit=True)
if qtapp is None:
# Ensure that the widget remains active by creating a reference to it.
# There's probably a better place to put that reference but this will do
# for now. (From the matplotlib RectangleSelector gallery example.)
toggle_selector.RS = selector
else:
qtapp.rectangle_selector = selector
ax.figure.canvas.mpl_connect('key_press_event', toggle_selector)
selector.set_active(True)
return rois
def _in_range(arr, tup):
low, high = tup
return (low <= arr) & (arr < high)
def cluster(coordinates, radius, core_size):
scan = DBSCAN(eps=radius, min_samples=core_size).fit(coordinates)
return scan
def _centroids(labels, sizes, coords):
clustered = labels > -1
labels, coords = labels[clustered], coords[clustered]
grouping = np.argsort(labels)
sums = np.add.reduceat(coords[grouping], np.cumsum(sizes)[:-1])
means = sums / sizes[1:, np.newaxis]
return means
def analyse_clustering(scan):
labels = scan.labels_
unclustered = labels == -1
num_unclustered = np.sum(unclustered)
cluster_sizes = np.bincount(labels[~unclustered])
counts, bin_edges = np.histogram(cluster_sizes, bins='auto')
histogram = np.convolve(bin_edges, [0.5, 0.5], 'valid'), counts
print(f'There are {len(cluster_sizes)} clusters, and {num_unclustered} '
f'outlier points, out of {labels.size}. The largest cluster size is '
f'{np.max(cluster_sizes)} and the median is '
f'{np.median(cluster_sizes)}')
return labels, cluster_sizes, histogram
def bbox_diameter(coords):
return np.hypot(*np.max(coords, axis=0) - np.min(coords, axis=0))
def summarise_clustering(scan, coords):
labels, sizes, hist = analyse_clustering(scan)
diameters = labeled_comprehension(coords, scan.labels_,
bbox_diameter)[1:, np.newaxis]
cluster_centroids = _centroids(labels, sizes, coords)
cluster_sq_centroids = _centroids(labels, sizes, coords ** 2)
centroid_vars = np.sqrt(cluster_sq_centroids - cluster_centroids ** 2)
column_names = ['centroid row', 'centroid column',
'std dev row', 'std dev column',
'detections', 'cluster id', 'diameter']
idxs = np.arange(1, np.max(labels) + 1)
columns = (cluster_centroids, centroid_vars,
sizes[1:, np.newaxis], idxs[:, np.newaxis], diameters)
print([c.shape for c in columns])
data = np.concatenate(columns, axis=1)
df = pd.DataFrame(data=data, columns=column_names)
return df
def labeled_comprehension(features, labels, function, *args,
extra_args=(), extra_kwargs={}, **kwargs):
"""Like ndi.labeled_comprehension, but features can be higher D."""
nonneg = labels >= 0
features = features[nonneg]
labels = labels[nonneg]
sorter = | np.argsort(labels) | numpy.argsort |
from OpticalElement import Optical_element
from SurfaceConic import SurfaceConic
from Beam import Beam
from Vector import Vector
import matplotlib.pyplot as plt
import numpy as np
import Shadow
from Shape import BoundaryRectangle
from CompoundOpticalElement import CompoundOpticalElement
main = "__main4__"
axis = 'z'
both = None
plot_dim = 0
if both == True:
axis = 'z'
axis1 = 'x'
def shadow_source():
iwrite = 0
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe0.FDISTR = 1
oe0.FSOUR = 1
oe0.F_PHOT = 0
oe0.HDIV1 = 0.0
oe0.HDIV2 = 0.0
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.PH1 = 1000.0
oe0.VDIV1 = 0.0
oe0.VDIV2 = 0.0
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam
def time_comparision(beam1, elements, oe):
origin = np.ones(beam1.N)
tf = 1e35 * np.ones(beam1.N)
for i in range (0, len(elements)):
beam = beam1.duplicate()
[beam, t] = oe[i].intersection_with_optical_element(beam)
indices = np.where(beam.flag < 0)
t[indices] = 1e30
print(indices)
tf = np.minimum(t, tf)
indices = np.where(t == tf)
origin[indices] = elements[i]
return origin
if main == "__main__":
varx = np.zeros(100)
varz = np.zeros(100)
qqq = np.zeros(100)
#for i in range (0, 1):
beam = Beam(25000)
beam.set_circular_spot(1e-3)
beam.set_divergences_collimated()
shadow_beam = shadow_source()
beam = Beam()
beam.initialize_from_arrays(
shadow_beam.getshonecol(1),
shadow_beam.getshonecol(2),
shadow_beam.getshonecol(3),
shadow_beam.getshonecol(4),
shadow_beam.getshonecol(5),
shadow_beam.getshonecol(6),
shadow_beam.getshonecol(10),
0
)
beam.flag *= 0
p = 5.
q = 15.
theta = 88. * np.pi / 180
beta = 90.* np.pi / 180
alpha = 87. * np.pi / 180
xmax = 0.
xmin = -0.4
ymax = 0.4
ymin = -0.4
zmax = 0.4
zmin = 0.
oe1 = Optical_element.initialize_as_surface_conic_paraboloid_from_focal_distances(p=p, q=q, theta=theta, alpha=0., infinity_location="p", focal=q, cylindrical=1)
bound1 = BoundaryRectangle(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, zmax=zmax, zmin=zmin)
oe1.bound = bound1
ccc1 = oe1.ccc_object.get_coefficients()
c1 = ccc1[1].copy()
c2 = ccc1[2].copy()
c4 = ccc1[4].copy()
c8 = ccc1[8].copy()
####### rotation of the oe around y #############################################################################
a = np.cos(beta)
b = np.sin(beta)
ccc2 = np.array([c2 * b ** 2, c1, c2 * a ** 2, -c4 * b, c4 * a, -2 * c2 * a * b, -c8 * b, 0., c8 * a, 0.])
oe2 = Optical_element.initialize_as_surface_conic_from_coefficients(ccc2)
oe2.p = p
oe2.q = q
oe2.theta = theta
oe2.alpha = 0.
oe2.type = "Surface conical mirror"
bound2 = BoundaryRectangle(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, zmax=zmax, zmin=zmin)
oe2.bound = bound2
ccc = np.array([0., 0., 0., 0., 0., 0., 0., 1., 0., -q])
screen = Optical_element.initialize_as_surface_conic_from_coefficients(ccc)
screen.set_parameters(p, q, 0., 0., "Surface conical mirror")
###########################################################################################################################
print(beam.y)
theta = np.pi/2 - theta
vector = Vector(0., 1., 0.)
vector.rotation(-theta, 'x')
print(vector.info())
print("theta' = %f, theta'' = %f" %(np.arctan(vector.x/vector.y)*180/np.pi, np.arctan(vector.z/vector.y)*180/np.pi))
ny = -vector.z/np.sqrt(vector.y**2+vector.z**2)
nz = vector.y/np.sqrt(vector.y**2+vector.z**2)
n = Vector(0, ny, nz)
vrot = vector.rodrigues_formula(n, -theta)
vrot.normalization()
print("theta' = %f, theta'' = %f" %(np.arctan(vrot.x/vrot.y)*180/np.pi, np.arctan(vrot.z/vrot.y)*180/np.pi))
print(vrot.info())
#########################################################################################################################
position = Vector(beam.x, beam.y, beam.z)
mod_position = position.modulus()
velocity = Vector(beam.vx, beam.vy, beam.vz)
position.rotation(-theta, 'x')
velocity.rotation(-theta, 'x')
position = position.rodrigues_formula(n, -theta)
velocity = velocity.rodrigues_formula(n, -theta)
velocity.normalization()
position.normalization()
position.x = position.x * mod_position
position.y = position.y * mod_position
position.z = position.z * mod_position
[beam.x, beam.y, beam.z] = [position.x, position.y, position.z]
[beam.vx, beam.vy, beam.vz] = [velocity.x, velocity.y, velocity.z]
####### translation ###################################################################################################
vector_point = Vector(0, p, 0)
vector_point.rotation(-(np.pi / 2 - oe2.theta - 0*np.pi/4), "x")
vector_point = vector_point.rodrigues_formula(n, -theta)
vector_point.normalization()
beam.x = beam.x - vector_point.x * p
beam.y = beam.y - vector_point.y * p
beam.z = beam.z - vector_point.z * p
########################################################################################################################
print(beam.y)
element = [1, 2, 3]
oe = [oe1, oe2, screen]
origin = time_comparision(beam, element, oe)
print(origin)
indices = np.where(origin==1)
beam1 = beam.part_of_beam(indices)
indices = np.where(origin==2)
beam2 = beam.part_of_beam(indices)
indices = np.where(origin==3)
beam3 = beam.part_of_beam(indices)
print(beam1.N, beam2.N, beam3.N)
[beam3, t] = screen.intersection_with_optical_element(beam3)
beam1_list = [beam1.duplicate(), Beam(), Beam()]
beam2_list = [beam2.duplicate(), Beam(), Beam()]
beam3_list = [beam3.duplicate(), Beam(), Beam()]
for i in range (0, 2):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Iteration number %d" %i)
print(beam1_list[i].N, beam2_list[i].N)
[beam1_list[i], t] = oe1.intersection_with_optical_element(beam1_list[i])
oe1.output_direction_from_optical_element(beam1_list[i])
origin = time_comparision(beam1_list[i], [2, 3], [oe2, screen])
indices = np.where(origin==2)
beam2_list[i+1] = beam1_list[i].part_of_beam(indices)
indices = np.where(origin==3)
beam03 = beam1_list[i].part_of_beam(indices)
[beam2_list[i], t] = oe2.intersection_with_optical_element(beam2_list[i])
oe2.output_direction_from_optical_element(beam2_list[i])
origin = time_comparision(beam2_list[i], [1, 3], [oe1, screen])
indices = np.where(origin == 1)
beam1_list[i+1] = beam2_list[i].part_of_beam(indices)
indices = np.where(origin == 3)
beam003 = beam2_list[i].part_of_beam(indices)
beam3_list[i+1] = beam03.merge(beam003)
[beam3_list[i+1], t] = screen.intersection_with_optical_element(beam3_list[i+1])
plt.figure()
plt.plot(beam3_list[0].x, beam3_list[0].z, 'ro')
plt.plot(beam3_list[1].x, beam3_list[1].z, 'bo')
plt.plot(beam3_list[2].x, beam3_list[2].z, 'go')
plt.xlabel('x axis')
plt.ylabel('z axis')
plt.axis('equal')
plt.show()
if main == "__main2__":
shadow_beam = shadow_source()
beam = Beam()
beam.initialize_from_arrays(
shadow_beam.getshonecol(1),
shadow_beam.getshonecol(2),
shadow_beam.getshonecol(3),
shadow_beam.getshonecol(4),
shadow_beam.getshonecol(5),
shadow_beam.getshonecol(6),
shadow_beam.getshonecol(10),
0
)
beam = Beam(25000)
beam.set_flat_divergence(25*1e-6, 25*1e-6)
beam.set_rectangular_spot(xmax=25*1e-6, xmin=-25*1e-6, zmax=5*1e-6, zmin=-5*1e-6)
beam.set_gaussian_divergence(25*1e-6, 25*1e-6)
beam.set_divergences_collimated()
beam.flag *= 0
p = 5.
q = 15.
theta = 88. * np.pi /180.
xmax = 0.
xmin = -0.3
ymax = 0.3
ymin = -0.3
zmax = 0.3
zmin = 0.
bound1 = BoundaryRectangle(xmax, xmin, ymax, ymin, zmax, zmin)
bound2 = BoundaryRectangle(xmax, xmin, ymax, ymin, zmax, zmin)
Nn = 25
qq = np.ones(Nn)
dx = np.ones(Nn)
for i in range (0, Nn):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Iteration %d" %i)
beam1 = beam.duplicate()
qq[i] = q - 1. + 2. * i / Nn
print(qq[i])
montel = CompoundOpticalElement.initialize_as_montel_parabolic(p=p, q=q, theta=theta, bound1=bound1, bound2=bound2, distance_of_the_screen=qq[i])
beam03 = montel.trace_montel(beam1)
if beam03[2].N != 0:
dx[i] = max(beam03[2].x) - min(beam03[2].x)
else:
dx[i] = 100
plt.figure()
plt.plot(qq, dx)
min_of_qq = min(dx)
indice = | np.where(dx == min_of_qq) | numpy.where |
import unittest
import shutil
import tempfile
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import coremltools
from os.path import dirname
from tensorflow.python.tools.freeze_graph import freeze_graph
import tfcoreml as tf_converter
"""IMPORTANT NOTE TO ADD NEW TESTS:
For each test function you should set up your own graph and session.
Otherwise TF will carry all ops and tensors from previously run tests.
"""
def _tf_transpose(x, is_sequence=False):
if not hasattr(x, "shape"):
return x
if len(x.shape) == 4:
# [Batch, Height, Width, Channels] --> [Batch, Channels, Height, Width]
x = np.transpose(x, [0,3,1,2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# We only deal with non-recurrent networks for now
# [Batch, (Sequence) Length, Channels] --> [1,B, Channels, 1, Seq]
# [0,1,2] [0,2,1]
return np.transpose(x, [0,2,1])[None,:,:,None,:]
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1, ) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict)
return model
def _generate_data(input_shape, mode = 'random'):
"""
Generate some random data according to a shape.
"""
if input_shape is None or len(input_shape) == 0:
return 0.5
if mode == 'zeros':
X = np.zeros(input_shape)
elif mode == 'ones':
X = np.ones(input_shape)
elif mode == 'linear':
X = np.array(range(np.product(input_shape))).reshape(input_shape)*1.0
elif mode == 'random':
X = np.random.rand(*input_shape)
elif mode == 'random_zero_mean':
X = np.random.rand(*input_shape)-0.5
return X
class TFNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(self):
""" Set up the unit test by loading common utilities.
"""
def _simple_freeze(self, input_graph, input_checkpoint, output_graph,
output_node_names):
# output_node_names is a string of names separated by comma
freeze_graph(input_graph=input_graph,
input_saver="",
input_binary=False,
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=output_graph,
clear_devices=True,
initializer_nodes="")
def _test_coreml_conversionl(self, model_dir, frozen_model_file, coreml_model_file,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only):
# convert the tensorflow model
output_tensor_names = [name + ':0' for name in output_node_names]
coreml_model = _convert_to_coreml(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file,
input_name_shape_dict=input_tensor_shapes,
output_names=output_tensor_names)
# evaluate coreml
coreml_inputs = {}
for idx, in_tensor_name in enumerate(input_tensor_shapes):
in_shape = input_tensor_shapes[in_tensor_name]
coreml_in_name = in_tensor_name.replace(':', '__').replace('/', '__')
if one_dim_seq_flags is None:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name]).copy()
else:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name], one_dim_seq_flags[idx]).copy()
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tp = _tf_transpose(tf_result[idx]).flatten()
out_tensor_name = out_name.replace('/','__') + '__0'
cp = coreml_output[out_tensor_name].flatten()
self.assertEquals(len(tp), len(cp))
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEquals(tp[i]/max_den, cp[i]/max_den, delta=delta)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def _test_tf_model(self, graph, input_tensor_shapes, output_node_names,
data_mode = 'random', delta = 1e-2, use_cpu_only = False,
one_dim_seq_flags = None):
""" Common entry to testing routine.
graph - defined TensorFlow graph.
input_tensor_shapes - dict str:shape for each input (placeholder)
output_node_names - output_node_names, a list of strings
output_tensor_names - output tensor names, a list of strings, usually
just output_node_names each appended with ':0'
"""
# Some file processing
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, 'tf_graph.pbtxt')
checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
# add a saver
tf.reset_default_graph()
with graph.as_default() as g:
saver = tf.train.Saver()
with tf.Session(graph = graph) as sess:
# initialize
sess.run(tf.global_variables_initializer())
# prepare the tensorflow inputs
feed_dict = {}
for in_tensor_name in input_tensor_shapes:
in_tensor_shape = input_tensor_shapes[in_tensor_name]
feed_dict[in_tensor_name] = _generate_data(in_tensor_shape, data_mode)
# run the result
fetches = [graph.get_operation_by_name(name).outputs[0] for name in \
output_node_names]
tf_result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, model_dir, graph_def_file)
# save the weights
saver.save(sess, checkpoint_file)
# freeze the graph
self._simple_freeze(
input_graph=graph_def_file,
input_checkpoint=checkpoint_file,
output_graph=frozen_model_file,
output_node_names=",".join(output_node_names))
#convert and test numerical accuracy with CoreML
self._test_coreml_conversionl(model_dir, frozen_model_file, coreml_model_file,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only)
def _test_tf_model_constant(self, graph, input_tensor_shapes, output_node_names,
data_mode='random', delta=1e-2, use_cpu_only=False,
one_dim_seq_flags=None):
""" Common entry to testing routine for graphs that have no variables.
graph - defined TensorFlow graph.
input_tensor_shapes - dict str:shape for each input (placeholder)
output_node_names - output_node_names, a list of strings
output_tensor_names - output tensor names, a list of strings, usually
just output_node_names each appended with ':0'
"""
model_dir = tempfile.mkdtemp()
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
with tf.Session(graph = graph) as sess:
# initialize
sess.run(tf.global_variables_initializer())
# prepare the tensorflow inputs
feed_dict = {}
for in_tensor_name in input_tensor_shapes:
in_tensor_shape = input_tensor_shapes[in_tensor_name]
feed_dict[in_tensor_name] = _generate_data(in_tensor_shape, data_mode)
# run the result
fetches = [graph.get_operation_by_name(name).outputs[0] for name in \
output_node_names]
tf_result = sess.run(fetches, feed_dict=feed_dict)
#save the frozen .pb
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names #The output node names are used to select the usefull nodes
)
with tf.gfile.GFile(frozen_model_file, "wb") as f:
f.write(output_graph_def.SerializeToString())
#convert and test numerical accuracy with CoreML
self._test_coreml_conversionl(model_dir, frozen_model_file, coreml_model_file,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only)
class TFSimpleNetworkTest(TFNetworkTest):
def test_toy(self):
# Define your TF graph here
graph = tf.Graph()
with graph.as_default() as g:
# matrix1 is input of shape (Batch=1,Channels=2)
matrix1 = tf.placeholder(tf.float32, shape=[1,2], name="test_toy/input")
matrix2 = tf.Variable(tf.truncated_normal([2,1]))
product = tf.matmul(matrix1, matrix2, name = "test_toy/product")
saver = tf.train.Saver()
self._test_tf_model(graph, {"test_toy/input:0":[1,2]},
["test_toy/product"], delta=1e-2)
def test_linear(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20], name="test_linear/input")
# Make a redundant tensor. It should get trimmed
gt = tf.placeholder(tf.float32, shape=[None,10])
W = tf.Variable(tf.ones([20,10]))
b = tf.Variable(tf.ones([10]))
y = tf.matmul(x,W) + b
output_name = [y.op.name]
# not batched
self._test_tf_model(graph, {"test_linear/input:0":[1,20]},
output_name, delta=1e-2)
# batched
self._test_tf_model(graph, {"test_linear/input:0":[8,20]},
output_name, delta=1e-2)
def test_log(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20], name="test_log/input")
# Make a redundant tensor. It should get trimmed
gt = tf.placeholder(tf.float32, shape=[None,10])
W = tf.Variable(tf.ones([20,10]))
b = tf.Variable(tf.ones([10]))
y = tf.log(tf.matmul(x,W) + b)
output_name = [y.op.name]
self._test_tf_model(graph, {"test_log/input:0":[1,20]},
output_name, delta=1e-2)
def test_simple_convnet(self):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
graph = tf.Graph()
with graph.as_default() as g:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.placeholder(tf.float32, shape=[None,28,28,1],
name="test_simple_conv/input")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
output_name = [h_pool2.op.name]
self._test_tf_model(graph,
{"test_simple_conv/input:0":[1,28,28,1]},
output_name, delta=1e-2)
def test_convnet(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_convnet/input")
W_conv1 = tf.Variable(tf.truncated_normal([3,3,3,2], stddev=0.3))
h_conv1 = tf.nn.conv2d(x_image,W_conv1, strides=[1,1,1,1], padding='SAME')
h_conv1_flat = tf.reshape(h_conv1, [-1, 8*8*2])
W_fc1 = tf.Variable(tf.truncated_normal([8*8*2,4], stddev=0.3))
h_fc1 = tf.matmul(h_conv1_flat, W_fc1)
output_name = [h_fc1.op.name]
# not batched
self._test_tf_model(graph,
{"test_convnet/input:0":[1,8,8,3]}, output_name, delta=1e-2)
# batched
self._test_tf_model(graph,
{"test_convnet/input:0":[10,8,8,3]}, output_name, delta=1e-2)
def test_reduce_max(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20],
name="test_reduce_max/input")
W = tf.Variable(tf.ones([20,10]))
y = tf.matmul(x,W)
output = tf.reduce_max(y, axis=-1)
output_name = [output.op.name]
# not batched
self._test_tf_model(graph, {"test_reduce_max/input:0":[1,20]},
output_name, delta=1e-2)
def test_pad_conv_fuse(self):
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,32,18,3],
name="test_pad_conv/input")
W = tf.Variable(tf.truncated_normal([9,9,3,5], stddev=1))
paddings = tf.constant([[0, 0], [5,5], [1,1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
output = tf.nn.conv2d(x_pad,W,strides=[1,1,1,1], padding='VALID')
output_name = [output.op.name]
self._test_tf_model(graph,
{"test_pad_conv/input:0":[1,32,18,3]}, output_name, delta=.05)
def test_dilated_conv(self):
#params: (Hin,Win,K,pad,dilation)
Cin = 3
Cout = 5
params = [(32,18,3,3),
(14,13,3,4),
(14,19,1,3),
(17,18,5,3),
(14,20,3,3)]
for param in params:
Hin, Win, K, d = param
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,Hin,Win,Cin],
name="test_pad_conv/input")
W = tf.Variable(tf.truncated_normal([K,K,Cin,Cout], stddev=1))
output = tf.nn.convolution(x,W,strides=[1,1], padding='VALID',
dilation_rate=[d,d])
output_name = [output.op.name]
self._test_tf_model(graph,
{"test_pad_conv/input:0":[1,Hin,Win,Cin]}, output_name, delta=.05)
class TFSingleLayersTest(TFNetworkTest):
""" Small models from tensorflow.layers
"""
def test_dense(self):
# dense layer with some activation
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,10],
name="test_dense/input")
y = tf.layers.dense(inputs=x, units=16, activation=tf.sigmoid)
output_name = [y.op.name]
self._test_tf_model(graph,
{"test_dense/input:0":[1,10]}, output_name, delta=1e-2)
def test_dense_concat(self):
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None, 10],
name="test_dense/input")
y = tf.layers.dense(inputs=x, units=16, activation=tf.nn.relu)
z1 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z2 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z3 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z = tf.concat([z1,z2,z3], axis=1)
output_name = [z.op.name]
self._test_tf_model(graph,
{"test_dense/input:0": [1, 10]}, output_name, delta=1e-2)
def test_conv2d(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[5,5],
padding='same', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_valid(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d_valid/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_valid/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_stride2(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d_stride2/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', strides=(2,2))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_stride2/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_dilated(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,32,32,3],
name="test_conv2d_dilated/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', dilation_rate=(3,4))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_dilated/input:0":[1,32,32,3]}, output_name, delta=1e-2)
def test_conv2dt(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='same', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2dt_valid(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt_valid/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt_valid/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2dt_stride2(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt_stride2/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', strides=(2,2))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt_stride2/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_avepool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_avepool/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
pool1 = tf.layers.average_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv2d_avepool/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_maxpool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_maxpool/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[3, 3], strides=1,
padding='same')
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv2d_maxpool/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_bn(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_bn/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
bn1 = tf.layers.batch_normalization(inputs=conv1, axis=-1)
output_name = [bn1.op.name]
self._test_tf_model(graph,
{"test_conv2d_bn/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_spatial_bn(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_bn/input")
bn1 = tf.layers.batch_normalization(inputs=x_image, axis=2)
output_name = [bn1.op.name]
self._test_tf_model(graph,
{"test_conv2d_bn/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_separable_conv2d(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_separable_conv2d/input")
conv1 = tf.layers.separable_conv2d(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', depth_multiplier=2)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_separable_conv2d/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv1d(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='valid', use_bias=True)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv1d/input:0":[1,8,3]}, output_name, data_mode='linear',
delta=.05)
def test_conv1d_dense(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_dense/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='same')
conv1_flat = tf.reshape(conv1,[-1,8*2])
y = tf.layers.dense(inputs=conv1_flat, units=6, activation=tf.nn.relu)
output_name = [y.op.name]
# not batched
self._test_tf_model(graph,
{"test_conv1d_dense/input:0":[1,8,3]}, output_name, delta=1e-2)
# batched
self._test_tf_model(graph,
{"test_conv1d_dense/input:0":[10,8,3]}, output_name, delta=1e-2)
def test_conv1d_avepool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_avepool/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=5,
padding='same')
pool1 = tf.layers.average_pooling1d(inputs=conv1, pool_size=2,
strides=2)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv1d_avepool/input:0":[1,8,3]}, output_name, delta=1e-2)
def test_conv1d_maxpool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_maxpool/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='same')
pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2,
strides=1)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv1d_maxpool/input:0":[1,8,3]}, output_name, delta=1e-2)
def test_conv2d_resize_bilinear(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_resize_bl/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=3, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
bl1 = tf.image.resize_bilinear(images=conv1, size=[32,32])
output_name = [bl1.op.name]
self._test_tf_model(graph,
{"test_conv2d_resize_bl/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_concat_constants(self):
graph = tf.Graph()
x, y = np.meshgrid( | np.linspace(0., 1., 256) | numpy.linspace |
import matplotlib.pyplot as plt
import numpy as np
from numpy import ones, zeros, linspace, diag, eye, abs, mean, log, sqrt, tile, meshgrid, r_, diagflat, reshape, sign, \
where, array, repeat, newaxis
import numpy as np
from numpy import sum as npsum, max as npmax, min as npmin
from numpy.core.umath import minimum
from numpy.linalg import eig, norm, solve, pinv
from scipy.signal import lfilter as filter
plt.style.use('seaborn')
def FitMultivariateGarch(returns, demean=1, eps=0, df=500):
## Estimation of multivariate GARCH models
# INPUTS
# returns : [matrix] (n_ x t_end) returns so rows must correspond to time and columns to assets
# demean : [scalar] specifies whether returns should be demeaned (if demean = 1) or not to estimate the model default value is 1
# eps : [scalar] used in enforcing a_ii + b_ii <= 1 - eps the default value is zero
# df : [scalar] degree of freedom for the t-distribution the default value is 500 to make it, basically, normal
# OPS
# mu : [vector]
# ATMF : [matrix] coefficient matrix A-tilde (in the notation of the paper)
# BTMF : [matrix] coefficient matrix B-tilde (in the notation of the paper)
# CTMF : [matrix] coefficient matrix C-tilde (in the notation of the paper)
# Hhat : [matrix] forecasted conditional covariance matrix
# NOTE
# Initially written by <NAME> and <NAME>
if eps < 0:
raise ValueError('eps must be a (small) positive number')
# Initialization
[n_, t_] = returns.shape
if 1 == demean:
mu = mean(returns, 1, keepdims=True)
returns = returns - tile(mu, (1, t_))
S = returns @ returns.T / (t_ - 1)
x = returns
A = zeros((n_, n_))
B = zeros((n_, n_))
C = zeros((n_, n_))
# Rescale Data
scale = sqrt(mean(x ** 2, 1, keepdims=True))
x = x / tile(scale, (1, t_))
# Estimation of On-Diagonal Elements
h = zeros((n_, t_))
for i in range(n_):
# Likelihood Maximization
q0, q1, q2 = garch1f4(x[i].T, eps, df)[0]
A[i, i] = q1
B[i, i] = q2
C[i, i] = q0
h[i, :] = \
filter([0, q1], [1, -q2], x[i, :] ** 2 * (df - 2) / df, zi=array([mean(x[i, :] ** 2) * (df - 2) / df]))[0] \
+ filter([0, q0], [1, -q2], ones(t_))
# First-step Estimation of Off-Diagonal Elements
for i in range(n_):
for j in range(i + 1, n_):
# Likelihood Maximization
theta = garch2f8(x[i, :] * x[j, :], C[i, i], A[i, i], B[i, i], x[i, :] ** 2, h[i, :], C[j, j], A[j, j],
B[j, j], x[j, :] ** 2, h[j, :], df)
A[i, j] = theta[1]
B[i, j] = theta[2]
C[i, j] = theta[0]
A[j, i] = A[i, j]
B[j, i] = B[i, j]
C[j, i] = C[i, j]
# Transformation of Coefficient Matrices
ATMF = minfro(A)
BTMF = minfro(B)
CTMF = minfro(C / (1 - B)) * (1 - BTMF)
# Rescale
# C = C * (scale*scale.T)
CTMF = CTMF * (scale * scale.T)
# Forecast of Conditional Covariance Matrix
Hhat = zeros((n_, n_))
for i in range(n_):
for j in range(n_):
hSeries = filter([0, ATMF[i, j]], [1, -BTMF[i, j]], returns[i, :].T * returns[j, :].T, zi=array([S[i, j]]))[
0] + \
filter([0, CTMF[i, j]], [1, -BTMF[i, j]], ones(t_))
Hhat[i, j] = hSeries[t_ - 1]
return mu, ATMF, BTMF, CTMF, Hhat
def garch1f4(x, eps, df):
## Fit a GARCH(1,1) model with student-t errors
# INPUTS
# x : [vector] (T x 1) data generated by a GARCH(1,1) process
# OPS
# q : [vector] (4 x 1) parameters of the GARCH(1,1) process
# qerr : [vector] (4 x 1) standard error of parameter estimates
# hf : [scalar] current conditional heteroskedasticity estimate
# hferr : [scalar] standard error on hf
# NOTE
# o Uses a conditional t-distribution with fixed degrees of freedom
# o Originally written by <NAME>, 4/28/1997
# o Difference with garch1f: errors come from the score alone
# Parameters
gold = (1 + sqrt(5)) / 2 # step size increment
tol1 = 1e-7 # for termination criterion
tol2 = 1e-7 # for closeness to boundary
big = 2 # for making the hessian negative definite
maxiter = 50 # maximum number of iterations
n = 30 # number of points on the grid
# Rescale
y = (x.flatten() - mean(x.flatten())) ** 2
t = len(y)
scale = sqrt(mean(y ** 2))
y = y / scale
s = mean(y)
# Grid search
[ag, bg] = meshgrid(linspace(0, 1 - eps, n), linspace(0, 1 - eps, n))
cg = np.maximum(s * (1 - ag - bg), 0)
likeg = -np.Inf * ones((n, n))
for i in range(n):
for j in range(n - i):
h = filter(array([0, ag[i, j]]), array([1, -bg[i, j]]), y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter(array([0, cg[i, j]]), array([1, -bg[i, j]]), ones(t))
likeg[i, j] = -npsum(log(h) + (df + 1) * log(1 + y / h / df))
maxlikeg = npmax(likeg)
maxima = where(likeg == maxlikeg) ##ok<MXFND>
# Initialize optimization
a = r_[cg[maxima], ag[maxima], bg[maxima]]
best = 0
da = 0
# term = 1
# negdef = 0
iter = 0
# Begin optimization loop
while iter < maxiter:
iter = iter + 1
# New parameter1
a = a + gold ** best * da
# Conditional variance
h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, a[0]], [1, -a[2]], ones(t))
# Likelihood
if (any(a < 0) or ((a[1] + a[2]) > 1 - eps)):
like = -np.Inf
else:
like = -npsum(log(h) + (df + 1) * log(1 + y / h / df))
# Gradient
GG = r_['-1', filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
filter([0, 1], [1, -a[2]], y * (df - 2) / df)[..., newaxis],
filter([0, 1], [1, -a[2]], h)[..., newaxis]]
g1 = ((df + 1) * (y / (y + df * h)) - 1) / h
G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
gra = npsum(G, axis=0)
# Hessian
GG2 = GG[:, [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:, [0, 0, 0, 1, 1, 1, 2, 2, 2]]
g2 = -((df + 1) * (y / (y + df * h)) - 1) / h ** 2 - (df * (df + 1)) * (y / (y + df * h) ** 2 / h)
HH = zeros((t, 9))
HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
HH[:, 6] = HH[:, 2]
HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
HH[:, 7] = HH[:, 5]
HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(g1.reshape(-1, 1), 9, axis=1)
hes = reshape(npsum(H, axis=0), (3, 3), 'F')
# Negative definite
d, u = eig(hes)
# d = diagflat(d)
if any(d > 0):
negdef = 0
d = min(d, max(d[d < 0]) / big)
hes = u @ diagflat(d) @ u.T
else:
negdef = 1
# Direction
da = -gra.dot(pinv(hes))
# Termination criterion
term = da @ gra.T
if (term < tol1) and negdef:
break
# Step search
best = 0
newa = a + gold ** (best - 1) * da
if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
left = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
left = -sum(log(h) + (df + 1) * log(1 + y / h / df))
newa = a + gold ** best * da
if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
center = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
center = -sum(log(h) + (df + 1) * log(1 + y / h / df))
newa = a + gold ** (best + 1) * da
if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
right = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
right = -sum(log(h) + (df + 1) * log(1 + y / h / df))
if all(like > array([left, center, right])) or all(left > array([center, right])):
while True:
best = best - 1
center = left
newa = a + gold ** (best - 1) * da
if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
left = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
left = -sum(log(h) + (df + 1) * log(1 + y / h / df))
if all(center >= array([like, left])):
break
elif all(right > array([left, center])):
while True:
best = best + 1
center = right
newa = a + gold ** (best + 1) * da
if (any(newa < 0) or (newa[1] + newa[2]) > 1 - eps):
right = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
right = -npsum(log(h) + (df + 1) * log(1 + y / h / df))
if center > right:
break
# If stuck at boundary then stop
if (center == like) and (any(a < tol2) or (a[1] + a[2]) > 1 - tol2):
break
# End of optimization loop
a[a < tol2] = zeros(len(a[a < tol2]))
if a[1] + a[2] > 1 - tol2:
if a[1] < 1 - tol2:
a[1] = a[1] + (1 - a[1] - a[2])
else:
a[2] = a[2] + (1 - a[1] - a[2])
# Estimation error and volatility forecast
# aerr=inv(G.T@G)
tmp = (G.T @ G)
aerr = tmp.dot(pinv(eye(tmp.shape[0])))
hf = a[0] + a[1] * y[t - 1] * (df - 2) / df + a[2] * h[t - 1]
gf = r_[1, y[t - 1], h[t - 1]] + a[2] * GG[t - 1, :]
hferr = gf @ aerr @ gf.T
aerr = diagflat(aerr).T
# Revert to original scale
a[0] = a[0] * scale
aerr[0] = aerr[0] * scale ** 2
hf = hf * scale
hferr = hferr * scale ** 2
aerr = sqrt(aerr)
hferr = sqrt(hferr)
q = a
qerr = aerr
return q, qerr, hf, hferr
def garch2f8(y, c1, a1, b1, y1, h1, c2, a2, b2, y2, h2, df):
## Off-diagonal parameter estimation in bivariate GARCH(1,1) when diagonal parameters are given.
# INPUTS
# y : [vector] (T x 1) data generated by a GARCH(1,1) process
# OPS
# q : [vector] (4 x 1) parameters of the GARCH(1,1) process
# qerr : [vector] (4 x 1) standard error of parameter estimates
# hf : [scalar] current conditional heteroskedasticity estimate
# hferr : [scalar] standard error on hf
# NOTE
# o Originally written by <NAME>, 4/28/1997
# o Uses a conditional t-distribution with fixed degrees of freedom
# o Steepest Ascent on boundary, Hessian off boundary, no grid search
# Parameters
gold = (1 + sqrt(5)) / 2 # step size increment
tol1 = 1e-7 # for termination criterion
tol2 = 1e-7 # for closeness to boundary
big = 2 # for making the hessian negative definite
maxiter = 50 # maximum number of iterations
# n=30 # number of points on the grid
# Prepare
t = len(y)
y1 = y1.flatten()
y2 = y2.flatten()
y = y.flatten()
s = mean(y)
# s1=mean((y1))
# s2=mean((y2))
h1 = h1.flatten()
h2 = h2.flatten()
# Bounds
low = r_[-sqrt(c1 * c2), 0, 0] + tol2
high = r_[sqrt(c1 * c2), sqrt(a1 * a2), sqrt(b1 * b2)] - tol2
# Starting Point
a0 = 0.9 * sqrt(a1 * a2)
b0 = 0.9 * sqrt(b1 * b2)
c0 = mean(y) * (1 - a0 - b0) * (df - 2) / df
c0 = sign(c0) * min(abs(c0), 0.9 * sqrt(c1 * c2))
# Initialize optimization
a = r_[c0, a0, b0]
best = 0
da = 0
# term=1
# negdef=0
iter = 0
# Begin optimization loop
while iter < maxiter:
iter = iter + 1
# New parameter
# olda = a
a = a + gold ** best * da
# Conditional variance
h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, a[0]], [1, -a[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
# Likelihood
if (any(a < low) or any(a > high)):
like = -np.Inf
else:
# like=-sum(log(h)+y/h))
# like=-sum(log(h)+(df+1)*log(1+y/h/df))
if any(d <= 0) or any(1 + z / d / df <= 0):
like = -np.Inf
else:
like = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
# Gradient
GG = r_['-1', filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
filter([0, 1], [1, -a[2]], y * (df - 2) / df)[..., newaxis],
filter([0, 1], [1, -a[2]], h)[..., newaxis]]
g1 = h / d + (2 + df) * y / (z + d * df) - (2 + df) * h * z / (z + d * df) / d
G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
gra = npsum(G, axis=0)
# Hessian
GG2 = GG[:, [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:, [0, 0, 0, 1, 1, 1, 2, 2, 2]]
g2 = 1 / d + 2 * h ** 2 / d ** 2 - (2 + df) * y / (z + d * df) ** 2 * (-2 * y - 2 * df * h) \
- (2 + df) * z / (z + d * df) / d + 2 * (2 + df) * h * y / (z + d * df) / d \
+ (2 + df) * h * z / (z + d * df) ** 2 / d * (-2 * y - 2 * df * h) \
- 2 * (2 + df) * h ** 2 * z / (z + d * df) / d ** 2
HH = zeros((t, 9))
HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
HH[:, 6] = HH[:, 2]
HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
HH[:, 7] = HH[:, 5]
HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(g1.reshape(-1, 1), 9, axis=1)
hes = reshape(npsum(H, axis=0), (3, 3), 'F')
# Negative definite
val, u = eig(hes)
if all(val > 0):
hes = -eye(3)
negdef = 0
elif any(val > 0):
negdef = 0
val = minimum(val, max(val[val < 0]) / big)
hes = u @ diagflat(val) @ u.T
else:
negdef = 1
# Steepest Ascent or Newton
if any(a == low) or any(a == high):
da = -((gra @ gra.T) / (gra @ hes @ gra.T)) * gra
else:
da = -gra.dot(pinv(hes))
# Termination criterion
term = da @ gra.T
if ((term < tol1) and negdef):
break
# If you are on the boundary and want to get out, slide along
da[(a == low) & (da < 0)] = zeros(da[(a == low) & (da < 0)].shape)
da[(a == high) & (da > 0)] = zeros(da[(a == high) & (da > 0)].shape)
# If you are stuck in a corner, terminate too
if all(da == 0):
break
# Go no further than next boundary
hit = r_[(low[da != 0] - a[da != 0]) / da[da != 0],
(high[da != 0] - a[da != 0]) / da[da != 0]]
hit = hit[hit > 0]
da = min(r_[hit, 1]) * da
# Step search
best = 0
newa = a + gold ** (best - 1) * da
if (any(newa < low) or any(newa > high)):
left = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
if any(d <= 0) or any(1 + z / d / df <= 0):
left = -np.Inf
else:
left = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
newa = a + gold ** best * da
if (any(newa < low) or any(newa > high)):
center = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
if any(d <= 0) or any(1 + z / d / df <= 0):
center = -np.Inf
else:
center = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
newa = a + gold ** (best + 1) * da
if (any(newa < low) or any(newa > high)):
right = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
if any(d <= 0) or any(1 + z / d / df <= 0):
right = -np.Inf
else:
right = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
if all(like > array([left, center, right])) or all(left > array([center, right])):
while True:
best = best - 1
center = left
newa = a + gold ** (best - 1) * da
if (any(newa < low) or any(newa > high)):
left = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
if any(d <= 0) or any(1 + z / d / df <= 0):
left = -np.Inf
else:
left = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
if all(center >= [like, left]):
break
elif all(right > array([left, center])):
while True:
best = best + 1
center = right
newa = a + gold ** (best + 1) * da
if (any(newa < low) or any(newa > high)):
right = -np.Inf
else:
h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
+ filter([0, newa[0]], [1, -newa[2]], ones(t))
d = h1 * h2 - h ** 2
z = h2 * y1 + h1 * y2 - 2 * h * y
if any(d <= 0) or any(1 + z / d / df <= 0):
right = -np.Inf
else:
right = -npsum(log(d) + (2 + df) * log(1 + z / d / df)) / 2
if center > right:
break
q = a
return q
def minfro(A):
# INPUTS
# A : [matrix] an indefinite symmetric matrix with non-negative diagonal elements
# OPS
# XXX : [matrix] positive semi-definite matrix with same diagonal elements as A that is closest
# to A according to the Frobenius norm
# NOTE
# o Written initially by <NAME> (1997)
if any(diag(A) < 0):
raise ValueError('Diagonal Elements Must Be Non-Negative!')
elif npsum(A != A.T) != 0:
raise ValueError('Matrix Must Be Symmetric!')
elif all(eig(A)[0] >= 0):
XXX = A
else:
# if things go wrong make rho bigger and wait longer
rho = 0.75
tol = 3e-6 # tolerance
maxj = 10 # max number of iterations
n = A.shape[0]
# [n, nn] = A.shape
M = diagflat(diag(A)) # initialize with diagonal
# [n, nn] = A.shape
oldnorm = norm(M - A, ord='fro')
oldnormj = oldnorm
normj[0] = oldnorm
j = 1
incmax = 1e32 # just to enter the loop
while ((j < maxj) and (incmax > tol)):
incmax = 0
for i in range(n):
a = r_[A[:i, i], A[i + 1:n, i]]
m = r_[M[:i, i], M[i + 1:n, i]]
aii = A(i, i)
b = a - rho @ m
# Newton's step
x = newton(M, i, b, m, aii, n, rho)
P = | eye(n) | numpy.eye |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper/utility functions that a tf-transform implementation would find handy."""
import os
import re
from typing import Callable, Dict, List, Mapping, Optional, Set, Tuple, Union
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import annotators
from tensorflow_transform import common_types
from tensorflow_transform import graph_context
from tensorflow_transform import graph_tools
from tensorflow_transform import schema_inference
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.output_wrapper import TFTransformOutput
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.saved import saved_transform_io_v2
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
from tfx_bsl.tfxio import tensor_to_arrow
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
# pylint: enable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
_CompositeInstanceComponentType = np.ndarray
_CompositeComponentType = List[_CompositeInstanceComponentType]
_CACHED_EMPTY_ARRAY_BY_DTYPE = {}
_VALID_SCOPE_REGEX = re.compile('^[A-Za-z0-9]*$')
_INVALID_SCOPE_CHAR = re.compile('[^A-Za-z0-9_.\\-/>]')
METADATA_DIR_NAME = '.tft_metadata'
def _get_empty_array(dtype):
if dtype not in _CACHED_EMPTY_ARRAY_BY_DTYPE:
empty_array = np.array([], dtype)
empty_array.setflags(write=False)
_CACHED_EMPTY_ARRAY_BY_DTYPE[dtype] = empty_array
return _CACHED_EMPTY_ARRAY_BY_DTYPE[dtype]
def batched_placeholders_from_specs(specs):
"""Returns placeholders for the given tf.TypeSpecs or feature specs.
Args:
specs: a Dict[Text, Union[tf.TypeSpec, FeatureSpec]]. Note that the values
in this dict must be of the same type. Mixing is not allowed.
Returns:
A dictionary from strings to `Tensor`, `SparseTensor`s, or `RaggedTensor`s.
Raises:
ValueError: when the TypeSpec or feature spec has an unsupported dtype.
"""
if not (all([_is_feature_spec(s) for s in specs.values()]) or
all([isinstance(s, tf.TypeSpec) for s in specs.values()])):
raise TypeError('Specs must be all tf.TypeSpecs or feature specs. '
'Mixing is not allowed. Got: {}'.format(specs))
result = {}
for name, spec in specs.items():
if isinstance(spec, tf.RaggedTensorSpec):
# TODO(b/159717195): clean up protected-access
spec_dtype = spec._dtype # pylint: disable=protected-access
else:
spec_dtype = spec.dtype
if spec_dtype not in (tf.int64, tf.float32, tf.string):
raise ValueError('Feature {} ({}, {}) had invalid dtype'.format(
name, spec, type(spec)))
if isinstance(spec, tf.TypeSpec):
result[name] = _batched_placeholder_from_typespec(name, spec)
else:
result[name] = _batched_placeholder_from_feature_spec(name, spec)
return result
def _is_feature_spec(spec):
if isinstance(
spec, (tf.io.VarLenFeature, tf.io.SparseFeature, tf.io.FixedLenFeature)):
return True
return common_types.is_ragged_feature(spec)
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub('_', name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = 'F_{}'.format(scope_name)
return scope_name
def _batched_placeholder_from_typespec(name, typespec):
"""Creates a batched placeholder from a tf.TypeSpec."""
if isinstance(typespec,
(tf.TensorSpec, tf.SparseTensorSpec, tf.RaggedTensorSpec)):
sanitized_name = _sanitize_scope_name(name)
with tf.name_scope(sanitized_name):
return tf.nest.map_structure(
lambda tspec: tf.raw_ops.Placeholder( # pylint: disable=g-long-lambda
dtype=tspec.dtype,
shape=tspec.shape,
name=sanitized_name),
typespec,
expand_composites=True)
raise ValueError('Unsupported typespec: {}({}) for feature {}'.format(
typespec, type(typespec), name))
def _batched_placeholder_from_feature_spec(name, feature_spec):
"""Creates a batched placeholder from a feature spec."""
scope_name = _sanitize_scope_name(name)
if isinstance(feature_spec, tf.io.FixedLenFeature):
return tf.compat.v1.placeholder(
feature_spec.dtype, [None] + feature_spec.shape, name=scope_name)
elif isinstance(feature_spec, tf.io.VarLenFeature):
return tf.compat.v1.sparse_placeholder(
feature_spec.dtype, [None, None], name=scope_name)
elif isinstance(feature_spec, tf.io.SparseFeature):
shape = [None] + feature_spec.size if isinstance(
feature_spec.size, list) else [None, feature_spec.size]
return tf.compat.v1.sparse_placeholder(
feature_spec.dtype, shape, name=scope_name)
raise ValueError('Unsupported feature spec: {}({}) for feature {}'.format(
feature_spec, type(feature_spec), name))
def _extract_sparse_components(
sparse_value: common_types.SparseTensorValueType
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
if isinstance(sparse_value, tf.SparseTensor):
return (np.asarray(sparse_value.indices), np.asarray(sparse_value.values),
np.asarray(sparse_value.dense_shape))
elif isinstance(sparse_value, tf.compat.v1.SparseTensorValue):
return sparse_value
else:
raise ValueError(
'Expected SparseTensor or SparseTensorValue , but got {}'.format(
sparse_value))
def _get_num_values_per_instance_in_sparse_batch(batch_indices: np.ndarray,
batch_size: int) -> List[int]:
"""Computes the number of values per instance of the batch."""
result = [0] * batch_size
for arr in batch_indices:
result[arr[0]] += 1
return result
def _decompose_sparse_batch(
sparse_value: common_types.SparseTensorValueType
) -> Tuple[List[_CompositeComponentType], _CompositeComponentType]:
"""Decomposes a sparse batch into a list of sparse instances.
Args:
sparse_value: A `SparseTensor` or `SparseTensorValue` representing a batch
of N sparse instances. The indices of the SparseTensorValue are expected
to be sorted by row order.
Returns:
A tuple (instance_indices, instance_values) where the elements are lists
of N ndarrays representing the indices and values, respectively, of the
instances in the batch. The `instance_indices` include an ndarray per
dimension.
"""
batch_indices, batch_values, batch_shape = _extract_sparse_components(
sparse_value)
batch_size = batch_shape[0]
instance_rank = len(batch_shape) - 1
# Preallocate lists of length batch_size, initialized to empty ndarrays,
# representing the indices and values of instances. We can reuse the return
# value of _get_empty_array here because it is immutable.
instance_values = [_get_empty_array(batch_values.dtype)] * batch_size
instance_indices = [[_get_empty_array(batch_indices.dtype)] * instance_rank
for idx in range(batch_size)]
values_per_instance = _get_num_values_per_instance_in_sparse_batch(
batch_indices, batch_size)
offset = 0
for idx, num_values in enumerate(values_per_instance):
if num_values < 1:
continue
instance_values[idx] = batch_values[offset:offset + num_values]
for dim in range(instance_rank):
# Skipping the first dimension since that is the batch dimension.
instance_indices[idx][dim] = batch_indices[offset:offset + num_values,
dim + 1]
offset += num_values
return instance_indices, instance_values
def _decompose_varlen_batch(
sparse_value: common_types.SparseTensorValueType
) -> Tuple[_CompositeComponentType, _CompositeComponentType]:
"""Decomposes a sparse batch into a list of sparse/varlen instances.
Args:
sparse_value: A `SparseTensor` or `SparseTensorValue` representing a batch
of N sparse instances. The indices of the SparseTensorValue are expected
to be sorted by row order.
Returns:
A tuple (instance_indices, instance_values) where the elements are lists
of N ndarrays representing the indices and values, respectively, of the
instances in the batch.
Raises:
ValueError: If `sparse_value` is neither `SparseTensor` nor
`SparseTensorValue`.
ValueError: If `sparse_value` contains out-of-order indices.
"""
batch_indices, batch_values, batch_shape = _extract_sparse_components(
sparse_value)
batch_size = batch_shape[0]
instance_rank = len(batch_shape) - 1
# Preallocate lists of length batch_size, initialized to empty ndarrays,
# representing the indices and values of instances. We can reuse the return
# value of _get_empty_array here because it is immutable.
instance_values = [_get_empty_array(batch_values.dtype)] * batch_size
instance_indices = [_get_empty_array(batch_indices.dtype)] * batch_size
# Iterate over the rows in the batch. At each row, consume all the elements
# that belong to that row.
current_offset = 0
for current_row in range(batch_size):
start_offset = current_offset
# Scan forward until we reach an element that does not belong to the
# current row.
while current_offset < len(batch_indices):
row = batch_indices[current_offset][0]
if row == current_row:
# This element belongs to the current row.
current_offset += 1
elif row > current_row:
# We've reached the end of the current row.
break
else:
raise ValueError('Encountered out-of-order sparse index: {}.'.format(
batch_indices[current_offset]))
if current_offset == start_offset:
# If the current row is empty, leave the default value, which is an
# empty array.
pass
else:
instance_indices[current_row] = batch_indices[start_offset:current_offset,
1:]
if instance_rank == 1:
# In this case indices will have length 1, so for convenience we
# reshape from [-1, 1] to [-1].
current_row_indices = instance_indices[current_row] # type: np.ndarray
instance_indices[current_row] = current_row_indices.reshape([-1])
instance_values[current_row] = batch_values[start_offset:current_offset]
return instance_indices, instance_values
def _handle_varlen_batch(tensor_or_value: common_types.SparseTensorValueType,
name: str) -> _CompositeComponentType:
"""Decomposes a varlen tensor value into sparse tensor components."""
instance_indices, instance_values = _decompose_varlen_batch(tensor_or_value)
for indices in instance_indices: # type: np.ndarray
if len(indices.shape) > 1 or np.any(indices != np.arange(len(indices))):
raise ValueError('Encountered a SparseTensorValue that cannot be '
'decoded by ListColumnRepresentation.\n'
'"{}" : {}'.format(name, tensor_or_value))
return instance_values
def _handle_sparse_batch(
tensor_or_value: common_types.SparseTensorValueType,
spec: common_types.FeatureSpecType, name: str
) -> Dict[str, Union[List[_CompositeComponentType], _CompositeComponentType]]:
"""Decomposes a sparse tensor value into sparse tensor components."""
if len(spec.index_key) == 1:
index_keys = spec.index_key[0]
instance_indices, instance_values = _decompose_varlen_batch(tensor_or_value)
else:
index_keys = spec.index_key
instance_indices, instance_values = _decompose_sparse_batch(tensor_or_value)
result = {}
if isinstance(index_keys, list):
assert isinstance(instance_indices, list)
for key, indices in zip(index_keys, zip(*instance_indices)):
result[key] = indices
else:
result[index_keys] = instance_indices
result[spec.value_key] = instance_values
_check_valid_sparse_tensor(instance_indices, instance_values, spec.size, name)
return result
def _get_ragged_instance_component(
component_batch: _CompositeInstanceComponentType, batch_splits: np.ndarray,
instance_idx: int) -> _CompositeInstanceComponentType:
"""Extracts an instance component from a flat batch with given splits."""
instance_begin = batch_splits[instance_idx]
instance_end = batch_splits[instance_idx + 1]
return component_batch[instance_begin:instance_end]
def _handle_ragged_batch(tensor_or_value: common_types.RaggedTensorValueType,
spec: common_types.FeatureSpecType,
name: str) -> Dict[str, _CompositeComponentType]:
"""Decomposes a ragged tensor or value into ragged tensor components."""
if isinstance(tensor_or_value, tf.RaggedTensor):
nested_row_splits = tuple(
x.numpy() for x in tensor_or_value.nested_row_splits)
flat_values = tensor_or_value.flat_values.numpy()
elif isinstance(tensor_or_value, tf.compat.v1.ragged.RaggedTensorValue):
nested_row_splits = tensor_or_value.nested_row_splits
flat_values = tensor_or_value.flat_values
else:
raise ValueError('Expected RaggedTensor or RaggedTensorValue , but '
'got {}'.format(tensor_or_value))
result = {}
# The outermost row split represents batch dimension.
batch_splits = nested_row_splits[0]
batch_size = len(batch_splits) - 1
if len(nested_row_splits) != len(spec.partitions) + 1:
raise NotImplementedError(
'Ragged tensors with non-ragged dimensions are not supported, ragged '
'rank of feature "{}" is {}, partitions '
'are {}'.format(name,
len(nested_row_splits) - 1, spec.partitions))
# Iterate over all but batch dimension splits.
for row_splits, partition in zip(nested_row_splits[1:], spec.partitions):
if isinstance(partition, tf.io.RaggedFeature.RowLengths): # pytype: disable=attribute-error
row_lengths = row_splits[1:] - row_splits[:-1]
result[partition.key] = [
_get_ragged_instance_component(row_lengths, batch_splits, idx)
for idx in range(batch_size)
]
else:
raise NotImplementedError(
'Only `RowLengths` partitions of ragged features are supported, got '
'{} for ragged feature "{}"'.format(type(partition), name))
# Translate batch split indices for the current dimension to the
# next dimension.
batch_splits = row_splits[batch_splits]
# Split flat values according to the innermost dimension batch splits.
result[spec.value_key] = [
_get_ragged_instance_component(flat_values, batch_splits, idx)
for idx in range(batch_size)
]
return result
def to_instance_dicts(schema, fetches):
"""Converts fetches to the internal batch format.
Maps the values fetched by `tf.Session.run` or returned by a tf.function to
the internal batch format.
Args:
schema: A `Schema` proto.
fetches: A dict representing a batch of data, either as returned by
`Session.run` or eager tensors.
Returns:
A list of dicts where each dict is an in-memory representation of an
instance.
Raises:
ValueError: If `schema` is invalid.
"""
batch_dict = {}
batch_sizes = {}
feature_spec = schema_utils.schema_as_feature_spec(schema).feature_spec
for name, tensor_or_value in fetches.items():
spec = feature_spec[name]
if isinstance(spec, tf.io.FixedLenFeature):
value = | np.asarray(tensor_or_value) | numpy.asarray |
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy import optimize as sciopt
from Bio import Phylo
from treetime import config as ttconf
from treetime import MissingDataError,UnknownMethodError,NotReadyError
from .utils import tree_layout
from .clock_tree import ClockTree
rerooting_mechanisms = ["min_dev", "best", "least-squares"]
deprecated_rerooting_mechanisms = {"residual":"least-squares", "res":"least-squares",
"min_dev_ML": "min_dev", "ML":"least-squares"}
class TreeTime(ClockTree):
"""
TreeTime is a wrapper class to ClockTree that adds additional functionality
such as reroot, detection and exclusion of outliers, resolution of polytomies
using temporal information, and relaxed molecular clock models
"""
def __init__(self, *args,**kwargs):
"""
TreeTime constructor
Parameters
-----------
*args
Arguments to construct ClockTree
**kwargs
Keyword arguments to construct the GTR model
"""
super(TreeTime, self).__init__(*args, **kwargs)
def run(self, root=None, infer_gtr=True, relaxed_clock=None, n_iqd = None,
resolve_polytomies=True, max_iter=0, Tc=None, fixed_clock_rate=None,
time_marginal=False, sequence_marginal=False, branch_length_mode='auto',
vary_rate=False, use_covariation=False, **kwargs):
"""
Run TreeTime reconstruction. Based on the input parameters, it divides
the analysis into semi-independent jobs and conquers them one-by-one,
gradually optimizing the tree given the temporal constarints and leaf
node sequences.
Parameters
----------
root : str
Try to find better root position on a given tree. If string is passed,
the root will be searched according to the specified method. If none,
use tree as-is.
See :py:meth:`treetime.TreeTime.reroot` for available rooting methods.
infer_gtr : bool
If True, infer GTR model
relaxed_clock : dic
If not None, use autocorrelated molecular clock model. Specify the
clock parameters as :code:`{slack:<slack>, coupling:<coupling>}` dictionary.
n_iqd : int
If not None, filter tree nodes which do not obey the molecular clock
for the particular tree. The nodes, which deviate more than
:code:`n_iqd` interquantile intervals from the molecular clock
regression will be marked as 'BAD' and not used in the TreeTime
analysis
resolve_polytomies : bool
If True, attempt to resolve multiple mergers
max_iter : int
Maximum number of iterations to optimize the tree
Tc : float, str
If not None, use coalescent model to correct the branch lengths by
introducing merger costs.
If Tc is float, it is interpreted as the coalescence time scale
If Tc is str, it should be one of (:code:`opt`, :code:`const`, :code:`skyline`)
fixed_clock_rate : float
Fixed clock rate to be used. If None, infer clock rate from the molecular clock.
time_marginal : bool
If True, perform a final round of marginal reconstruction of the node's positions.
sequence_marginal : bool, optional
use marginal reconstruction for ancestral sequences
branch_length_mode : str
Should be one of: :code:`joint`, :code:`marginal`, :code:`input`.
If 'input', rely on the branch lengths in the input tree and skip directly
to the maximum-likelihood ancestral sequence reconstruction.
Otherwise, perform preliminary sequence reconstruction using parsimony
algorithm and do branch length optimization
vary_rate : bool or float, optional
redo the time tree estimation for rates +/- one standard deviation.
if a float is passed, it is interpreted as standard deviation,
otherwise this standard deviation is estimated from the root-to-tip regression
use_covariation : bool, optional
default False, if False, rate estimates will be performed using simple
regression ignoring phylogenetic covaration between nodes. If vary_rate is True,
use_covariation is true by default
**kwargs
Keyword arguments needed by the downstream functions
Returns
-------
TreeTime error/succces code : str
return value depending on success or error
"""
# register the specified covaration mode
self.use_covariation = use_covariation or (vary_rate and (not type(vary_rate)==float))
if (self.tree is None) or (self.aln is None and self.data.full_length is None):
raise MissingDataError("TreeTime.run: ERROR, alignment or tree are missing")
if (self.aln is None):
branch_length_mode='input'
self._set_branch_length_mode(branch_length_mode)
# determine how to reconstruct and sample sequences
seq_kwargs = {"marginal_sequences":sequence_marginal or (self.branch_length_mode=='marginal'),
"sample_from_profile":"root",
"reconstruct_tip_states":kwargs.get("reconstruct_tip_states", False)}
tt_kwargs = {'clock_rate':fixed_clock_rate, 'time_marginal':False}
tt_kwargs.update(kwargs)
seq_LH = 0
if "fixed_pi" in kwargs:
seq_kwargs["fixed_pi"] = kwargs["fixed_pi"]
if "do_marginal" in kwargs:
time_marginal=kwargs["do_marginal"]
# initially, infer ancestral sequences and infer gtr model if desired
if self.branch_length_mode=='input':
if self.aln:
self.infer_ancestral_sequences(infer_gtr=infer_gtr, **seq_kwargs)
self.prune_short_branches()
else:
self.optimize_tree(infer_gtr=infer_gtr,
max_iter=1, prune_short=True, **seq_kwargs)
avg_root_to_tip = np.mean([x.dist2root for x in self.tree.get_terminals()])
# optionally reroot the tree either by oldest, best regression or with a specific leaf
if n_iqd or root=='clock_filter':
if "plot_rtt" in kwargs and kwargs["plot_rtt"]:
plot_rtt=True
else:
plot_rtt=False
reroot_mechanism = 'least-squares' if root=='clock_filter' else root
self.clock_filter(reroot=reroot_mechanism, n_iqd=n_iqd, plot=plot_rtt, fixed_clock_rate=fixed_clock_rate)
elif root is not None:
self.reroot(root=root, clock_rate=fixed_clock_rate)
if self.branch_length_mode=='input':
if self.aln:
self.infer_ancestral_sequences(**seq_kwargs)
else:
self.optimize_tree(max_iter=1, prune_short=False,**seq_kwargs)
# infer time tree and optionally resolve polytomies
self.logger("###TreeTime.run: INITIAL ROUND",0)
self.make_time_tree(**tt_kwargs)
if self.aln:
seq_LH = self.tree.sequence_marginal_LH if seq_kwargs['marginal_sequences'] else self.tree.sequence_joint_LH
self.LH =[[seq_LH, self.tree.positional_joint_LH, 0]]
if root is not None and max_iter:
new_root = self.reroot(root='least-squares' if root=='clock_filter' else root, clock_rate=fixed_clock_rate)
self.logger("###TreeTime.run: rerunning timetree after rerooting",0)
self.make_time_tree(**tt_kwargs)
# iteratively reconstruct ancestral sequences and re-infer
# time tree to ensure convergence.
niter = 0
ndiff = 0
need_new_time_tree=False
while niter < max_iter:
self.logger("###TreeTime.run: ITERATION %d out of %d iterations"%(niter+1,max_iter),0)
# add coalescent prior
if Tc:
if Tc=='skyline' and niter<max_iter-1:
tmpTc='const'
else:
tmpTc=Tc
self.add_coalescent_model(tmpTc, **kwargs)
need_new_time_tree = True
# estimate a relaxed molecular clock
if relaxed_clock:
print("relaxed_clock", relaxed_clock)
self.relaxed_clock(**relaxed_clock)
need_new_time_tree = True
n_resolved=0
if resolve_polytomies:
# if polytomies are found, rerun the entire procedure
n_resolved = self.resolve_polytomies()
if n_resolved:
self.prepare_tree()
if self.branch_length_mode!='input': # otherwise reoptimize branch length while preserving branches without mutations
self.optimize_tree(prune_short=False, max_iter=0, **seq_kwargs)
need_new_time_tree = True
if need_new_time_tree:
self.make_time_tree(**tt_kwargs)
if self.aln:
ndiff = self.infer_ancestral_sequences('ml',**seq_kwargs)
else: # no refinements, just iterate
if self.aln:
ndiff = self.infer_ancestral_sequences('ml',**seq_kwargs)
self.make_time_tree(**tt_kwargs)
self.tree.coalescent_joint_LH = self.merger_model.total_LH() if Tc else 0.0
if self.aln:
seq_LH = self.tree.sequence_marginal_LH if seq_kwargs['marginal_sequences'] else self.tree.sequence_joint_LH
self.LH.append([seq_LH, self.tree.positional_joint_LH, self.tree.coalescent_joint_LH])
niter+=1
if ndiff==0 and n_resolved==0 and Tc!='skyline':
self.logger("###TreeTime.run: CONVERGED",0)
break
# if the rate is too be varied and the rate estimate has a valid confidence interval
# rerun the estimation for variations of the rate
if vary_rate:
if type(vary_rate)==float:
self.calc_rate_susceptibility(rate_std=vary_rate, params=tt_kwargs)
elif self.clock_model['valid_confidence']:
self.calc_rate_susceptibility(params=tt_kwargs)
else:
raise UnknownMethodError("TreeTime.run: rate variation for confidence estimation is not available. Either specify it explicitly, or estimate from root-to-tip regression.")
# if marginal reconstruction requested, make one more round with marginal=True
# this will set marginal_pos_LH, which to be used as error bar estimations
if time_marginal:
self.logger("###TreeTime.run: FINAL ROUND - confidence estimation via marginal reconstruction", 0)
tt_kwargs['time_marginal']=time_marginal
self.make_time_tree(**tt_kwargs)
# explicitly print out which branches are bad and whose dates don't correspond to the input dates
bad_branches =[n for n in self.tree.get_terminals()
if n.bad_branch and n.raw_date_constraint]
if bad_branches:
self.logger("TreeTime: The following tips don't fit the clock model, "
"please remove them from the tree. Their dates have been reset:",0,warn=True)
for n in bad_branches:
self.logger("%s, input date: %s, apparent date: %1.2f"%(n.name, str(n.raw_date_constraint), n.numdate),0,warn=True)
return ttconf.SUCCESS
def _set_branch_length_mode(self, branch_length_mode):
'''
if branch_length mode is not explicitly set, set according to
empirical branch length distribution in input tree
Parameters
----------
branch_length_mode : str, 'input', 'joint', 'marginal'
if the maximal branch length in the tree is longer than 0.05, this will
default to 'input'. Otherwise set to 'joint'
'''
if branch_length_mode in ['joint', 'marginal', 'input']:
self.branch_length_mode = branch_length_mode
elif self.aln:
bl_dis = [n.branch_length for n in self.tree.find_clades() if n.up]
max_bl = np.max(bl_dis)
if max_bl>0.1:
bl_mode = 'input'
else:
bl_mode = 'joint'
self.logger("TreeTime._set_branch_length_mode: maximum branch length is %1.3e, using branch length mode %s"%(max_bl, bl_mode),1)
self.branch_length_mode = bl_mode
else:
self.branch_length_mode = 'input'
def clock_filter(self, reroot='least-squares', n_iqd=None, plot=False, fixed_clock_rate=None):
'''
Labels outlier branches that don't seem to follow a molecular clock
and excludes them from subsequent molecular clock estimation and
the timetree propagation.
Parameters
----------
reroot : str
Method to find the best root in the tree (see :py:meth:`treetime.TreeTime.reroot` for options)
n_iqd : int
Number of iqd intervals. The outlier nodes are those which do not fall
into :math:`IQD\cdot n_iqd` interval (:math:`IQD` is the interval between
75\ :sup:`th` and 25\ :sup:`th` percentiles)
If None, the default (3) assumed
plot : bool
If True, plot the results
'''
if n_iqd is None:
n_iqd = ttconf.NIQD
if type(reroot) is list and len(reroot)==1:
reroot=str(reroot[0])
terminals = self.tree.get_terminals()
if reroot:
self.reroot(root='least-squares' if reroot=='best' else reroot, covariation=False, clock_rate=fixed_clock_rate)
else:
self.get_clock_model(covariation=False, slope=fixed_clock_rate)
clock_rate = self.clock_model['slope']
icpt = self.clock_model['intercept']
res = {}
for node in terminals:
if hasattr(node, 'raw_date_constraint') and (node.raw_date_constraint is not None):
res[node] = node.dist2root - clock_rate*np.mean(node.raw_date_constraint) - icpt
residuals = np.array(list(res.values()))
iqd = | np.percentile(residuals,75) | numpy.percentile |
#!/usr/bin/env python3
"""
Python OpenGL practical application.
"""
# Python built-in modules
import os # os function, i.e. checking file status
from itertools import cycle
import sys
# External, non built-in modules
import OpenGL.GL as GL # standard Python OpenGL wrapper
import OpenGL.GLU as GLU # standard Python OpenGL wrapper
import glfw # lean window system wrapper for OpenGL
import numpy as np # all matrix manipulations & OpenGL args
import pyassimp # 3D resource loader
import pyassimp.errors # Assimp error management + exceptions
from PIL import Image # load images for textures
import yaml
from transform import *
from fisheye_utils import project_points_fisheye
# import logging
# logger = logging.getLogger("pyassimp")
# handler = logging.StreamHandler()
# logger.addHandler(handler)
# logger.setLevel(logging.DEBUG)
# ------------ simple color fragment shader demonstrated in Practical 1 ------
COLOR_VERT = """#version 330 core
uniform mat4 modelviewprojection;
uniform vec3 color;
layout(location = 0) in vec3 position;
out vec3 fragColor;
void main() {
gl_Position = modelviewprojection * vec4(position, 1);
fragColor = color;
}"""
FISHEYE_COLOR_VERT = """#version 330 core
uniform mat4 modelviewprojection;
uniform mat4 modelview;
uniform vec3 color;
uniform float fov;
layout(location = 0) in vec3 position;
out vec3 fragColor;
void main() {
gl_Position = modelviewprojection * vec4(position, 1);
vec4 tmp_point = modelview * vec4(position, 1);
float f = 1/(fov/2);
float r = length(tmp_point.xyz);
float theta = acos(tmp_point.z/r);
float R = f*theta;
gl_Position.xy = R * tmp_point.xy / length(tmp_point.xy);
fragColor = color;
}"""
COLOR_FRAG = """#version 330 core
in vec3 fragColor;
out vec4 outColor;
void main() {
outColor = vec4(fragColor, 1);
}"""
# -------------- Example texture plane class ----------------------------------
TEXTURE_VERT = """#version 330 core
uniform mat4 modelviewprojection;
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texposition;
out vec2 fragTexCoord;
void main() {
gl_Position = modelviewprojection * vec4(position, 1);
fragTexCoord = texposition;
}"""
FISHEYE_TEXTURE_VERT = """#version 330 core
uniform mat4 modelviewprojection;
uniform mat4 modelview;
uniform float fov;
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texposition;
out vec2 fragTexCoord;
void main() {
gl_Position = modelviewprojection * vec4(position, 1);
vec4 tmp_point = modelview * vec4(position, 1);
float r = length(tmp_point.xyz);
//if (r==0){
// gl_Position.x = 0;
// gl_Position.y = 0;
//}
//else
// gl_Position.xy = tmp_point.xy / (2*r);
float f = 1/(fov/2);
float theta;
if (r == 0)
theta = 0;
else
theta = acos(tmp_point.z/r);
float R = f*theta;
gl_Position.xy = R * tmp_point.xy / length(tmp_point.xy);
gl_Position.xy = gl_Position.xy / 2.0;
fragTexCoord = texposition;
}"""
TEXTURE_FRAG = """#version 330 core
uniform sampler2D diffuseMap;
uniform float alpha;
in vec2 fragTexCoord;
out vec4 outColor;
void main() {
outColor = texture(diffuseMap, fragTexCoord);
outColor.a = alpha;
}"""
# ------------ low level OpenGL object wrappers ----------------------------
class Shader:
""" Helper class to create and automatically destroy shader program """
@staticmethod
def _compile_shader(src, shader_type):
src = open(src, 'r').read() if os.path.exists(src) else src
src = src.decode('ascii') if isinstance(src, bytes) else src
shader = GL.glCreateShader(shader_type)
GL.glShaderSource(shader, src)
GL.glCompileShader(shader)
status = GL.glGetShaderiv(shader, GL.GL_COMPILE_STATUS)
src = ('%3d: %s' % (i+1, l) for i, l in enumerate(src.splitlines()))
if not status:
log = GL.glGetShaderInfoLog(shader).decode('ascii')
GL.glDeleteShader(shader)
src = '\n'.join(src)
print('Compile failed for %s\n%s\n%s' % (shader_type, log, src))
return None
return shader
def __init__(self, vertex_source, fragment_source):
""" Shader can be initialized with raw strings or source file names """
self.glid = None
vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)
frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)
if vert and frag:
self.glid = GL.glCreateProgram() # pylint: disable=E1111
GL.glAttachShader(self.glid, vert)
GL.glAttachShader(self.glid, frag)
GL.glLinkProgram(self.glid)
GL.glDeleteShader(vert)
GL.glDeleteShader(frag)
status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)
if not status:
print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))
GL.glDeleteProgram(self.glid)
self.glid = None
def __del__(self):
GL.glUseProgram(0)
if self.glid: # if this is a valid shader object
GL.glDeleteProgram(self.glid) # object dies => destroy GL object
class Texture:
""" Helper class to create and automatically destroy textures """
def __init__(self, file, wrap_mode=GL.GL_REPEAT, min_filter=GL.GL_LINEAR,
mag_filter=GL.GL_LINEAR_MIPMAP_LINEAR):
self.glid = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.glid)
# helper array stores texture format for every pixel size 1..4
format = [GL.GL_LUMINANCE, GL.GL_LUMINANCE_ALPHA, GL.GL_RGB, GL.GL_RGBA]
try:
# imports image as a numpy array in exactly right format
tex = np.array(Image.open(file))
format = format[0 if len(tex.shape) == 2 else tex.shape[2] - 1]
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, tex.shape[1],
tex.shape[0], 0, format, GL.GL_UNSIGNED_BYTE, tex)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, wrap_mode)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, wrap_mode)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, min_filter)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, mag_filter)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
# message = 'Loaded texture %s\t(%s, %s, %s, %s)'
# print(message % (file, tex.shape, wrap_mode, min_filter, mag_filter))
except IOError as e:
print(os.strerror(e.errno))
# except FileNotFoundError:
# print("ERROR: unable to load texture file %s" % file)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
def __del__(self): # delete GL texture from GPU when object dies
GL.glDeleteTextures(self.glid)
class Background:
def __init__(self, file):
self.shader = Shader(TEXTURE_VERT, TEXTURE_FRAG)
# quad to fill whole window
#vertices = np.array(((-1, -1, -1), (1, -1, -1), (1, 1, -1), (-1, 1, -1)), np.float32)
vertices = np.array(((-1, -1, 0), (1, -1, 0), (1, 1, 0), (-1, 1, 0)), np.float32)
texCoords = np.array(((0, 1), (1, 1), (1, 0), (0, 0)), np.float32)
faces = np.array(((0, 1, 2), (0, 2, 3)), np.uint32)
self.vertex_array = VertexArray([vertices, texCoords], faces)
# background image as texture
self.wrap_mode = GL.GL_CLAMP_TO_EDGE
self.filter_mode = (GL.GL_NEAREST, GL.GL_NEAREST)
# setup texture and upload it to GPU
self.texture = Texture(file, self.wrap_mode, *self.filter_mode)
def set(self, file):
self.texture = Texture(file, self.wrap_mode, *self.filter_mode)
def draw(self):
""" Draw background image using a quad. """
GL.glUseProgram(self.shader.glid)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# projection geometry
loc = GL.glGetUniformLocation(self.shader.glid, 'modelviewprojection')
GL.glUniformMatrix4fv(loc, 1, True, np.eye(4))
# texture access setups
loc = GL.glGetUniformLocation(self.shader.glid, 'diffuseMap')
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture.glid)
GL.glUniform1i(loc, 0)
self.vertex_array.execute(GL.GL_TRIANGLES)
# clear depth for background
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
# leave clean state for easier debugging
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glUseProgram(0)
class VertexArray:
""" helper class to create and self destroy OpenGL vertex array objects."""
def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):
""" Vertex array from attributes and optional index array. Vertex
Attributes should be list of arrays with one row per vertex. """
# create vertex array object, bind it
self.glid = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.glid)
self.buffers = [] # we will store buffers in a list
nb_primitives, size = 0, 0
# load buffer per vertex attribute (in list with index = shader layout)
for loc, data in enumerate(attributes):
if data is not None:
# bind a new vbo, upload its data to GPU, declare size and type
self.buffers += [GL.glGenBuffers(1)]
data = np.array(data, np.float32, copy=False) # ensure format
nb_primitives, size = data.shape
GL.glEnableVertexAttribArray(loc)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])
GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)
GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)
# optionally create and upload an index buffer for this object
self.draw_command = GL.glDrawArrays
self.arguments = (0, nb_primitives)
if index is not None:
self.buffers += [GL.glGenBuffers(1)]
index_buffer = np.array(index, np.int32, copy=False) # good format
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)
self.draw_command = GL.glDrawElements
self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)
# cleanup and unbind so no accidental subsequent state update
GL.glBindVertexArray(0)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)
def execute(self, primitive):
""" draw a vertex array, either as direct array or indexed array """
GL.glBindVertexArray(self.glid)
self.draw_command(primitive, *self.arguments)
GL.glBindVertexArray(0)
def __del__(self): # object dies => kill GL array and buffers from GPU
GL.glDeleteVertexArrays(1, [self.glid])
GL.glDeleteBuffers(len(self.buffers), self.buffers)
# ------------ Scene object classes ------------------------------------------
class Node(object):
""" Scene graph transform and parameter broadcast node """
def __init__(self, name='', children=(), transform=identity(), **param):
self.transform, self.param, self.name = transform, param, name
self.children = list(iter(children))
def add(self, *drawables):
""" Add drawables to this node, simply updating children list """
self.children.extend(drawables)
def draw(self, projection, view, model, **param):
""" Recursive draw, passing down named parameters & model matrix. """
# merge named parameters given at initialization with those given here
param = dict(param, **self.param)
model = np.dot(model, self.transform) # what to insert here for hierarchical update?
for child in self.children:
child.draw(projection, view, model, **param)
class BoundingBox:
def __init__(self, coords, width=0.01, color=[1,0,0]):
self.shader = Shader(COLOR_VERT, COLOR_FRAG)
self.color = np.array(color)
minx, maxx, miny, maxy = coords # normalized coordinates as [minx, maxx, miny, maxy]
vertices = np.array(((minx, maxy+width, 0), (minx-width, maxy+width, 0), (minx-width, maxy, 0),
(minx-width, miny, 0), (minx-width, miny-width, 0), (minx, miny-width, 0),
(maxx, miny-width, 0), (maxx+width, miny-width, 0), (maxx+width, miny, 0),
(maxx+width, maxy, 0), (maxx+width, maxy+width, 0), (maxx, maxy+width, 0)), np.float32)
# vertices = np.clip(vertices, -1, 1)
faces = np.array(((0, 1, 4), (0, 4, 5),
(8, 3, 4), (8, 4, 7),
(10, 11, 6), (10, 6, 7),
(10, 1, 2), (10, 2, 9)), np.uint32)
self.vertex_array = VertexArray([vertices], faces)
# background image as texture
self.wrap_mode = GL.GL_CLAMP_TO_EDGE
self.filter_mode = (GL.GL_NEAREST, GL.GL_NEAREST)
def draw(self):
""" Draw background image using a quad. """
GL.glUseProgram(self.shader.glid)
# projection geometry
loc = GL.glGetUniformLocation(self.shader.glid, 'modelviewprojection')
GL.glUniformMatrix4fv(loc, 1, True, np.eye(4))
loc = GL.glGetUniformLocation(self.shader.glid, 'color')
GL.glUniform3fv(loc, 1, self.color)
self.vertex_array.execute(GL.GL_TRIANGLES)
# leave clean state for easier debugging
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glUseProgram(0)
class Marker:
def __init__(self, coords, color=[1,0,0]):
self.shader = Shader(COLOR_VERT, COLOR_FRAG)
self.color = np.array(color)
minx, maxx, miny, maxy = coords # normalized coordinates as [minx, maxx, miny, maxy]
vertices = np.array(((minx, maxy, 0),
(minx, miny, 0),
(maxx, miny, 0),
(maxx, maxy, 0)), np.float32)
# vertices = np.clip(vertices, -1, 1)
faces = np.array(((0, 1, 2), (3, 0, 2)), np.uint32)
self.vertex_array = VertexArray([vertices], faces)
# background image as texture
self.wrap_mode = GL.GL_CLAMP_TO_EDGE
self.filter_mode = (GL.GL_NEAREST, GL.GL_NEAREST)
def draw(self):
""" Draw background image using a quad. """
GL.glUseProgram(self.shader.glid)
# projection geometry
loc = GL.glGetUniformLocation(self.shader.glid, 'modelviewprojection')
GL.glUniformMatrix4fv(loc, 1, True, np.eye(4))
loc = GL.glGetUniformLocation(self.shader.glid, 'color')
GL.glUniform3fv(loc, 1, self.color)
self.vertex_array.execute(GL.GL_TRIANGLES)
# leave clean state for easier debugging
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glUseProgram(0)
class TexturedPlane:
""" Simple first textured object """
def __init__(self, file):
# feel free to move this up in the viewer as per other practicals
self.shader = Shader(TEXTURE_VERT, TEXTURE_FRAG)
# triangle and face buffers
vertices = 100 * np.array(((-1, -1, 0), (1, -1, 0), (1, 1, 0), (-1, 1, 0)), np.float32)
faces = np.array(((0, 1, 2), (0, 2, 3)), np.uint32)
self.vertex_array = VertexArray([vertices, vertices[:,:2]], faces)
# interactive toggles
self.wrap = cycle([GL.GL_REPEAT, GL.GL_MIRRORED_REPEAT,
GL.GL_CLAMP_TO_BORDER, GL.GL_CLAMP_TO_EDGE])
self.filter = cycle([(GL.GL_NEAREST, GL.GL_NEAREST),
(GL.GL_LINEAR, GL.GL_LINEAR),
(GL.GL_LINEAR, GL.GL_LINEAR_MIPMAP_LINEAR)])
self.wrap_mode, self.filter_mode = next(self.wrap), next(self.filter)
self.file = file
# setup texture and upload it to GPU
self.texture = Texture(file, self.wrap_mode, *self.filter_mode)
def draw(self, projection, view, model, win=None, **_kwargs):
# some interactive elements
if glfw.get_key(win, glfw.KEY_F6) == glfw.PRESS:
self.wrap_mode = next(self.wrap)
self.texture = Texture(self.file, self.wrap_mode, *self.filter_mode)
if glfw.get_key(win, glfw.KEY_F7) == glfw.PRESS:
self.filter_mode = next(self.filter)
self.texture = Texture(self.file, self.wrap_mode, *self.filter_mode)
GL.glUseProgram(self.shader.glid)
# projection geometry
loc = GL.glGetUniformLocation(self.shader.glid, 'modelviewprojection')
GL.glUniformMatrix4fv(loc, 1, True, np.dot(np.dot(projection,view), model))
# texture access setups
loc = GL.glGetUniformLocation(self.shader.glid, 'diffuseMap')
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture.glid)
GL.glUniform1i(loc, 0)
self.vertex_array.execute(GL.GL_TRIANGLES)
# leave clean state for easier debugging
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glUseProgram(0)
class TexturedMesh:
""" Textured object class """
def __init__(self, obj_name, mesh, texture, attributes, indices):
self.obj_name = obj_name # name associated with meshes of object instance
self.mesh = mesh
# feel free to move this up in the viewer as per other practicals
#self.shader = Shader(TEXTURE_VERT, TEXTURE_FRAG)
self.model_matrix = np.eye(4)
# self.model_matrix[2,3] = -1
# triangle and face buffers
self.vertex_array = VertexArray(attributes, indices)
# interactive toggles
self.wrap = cycle([GL.GL_REPEAT, GL.GL_MIRRORED_REPEAT,
GL.GL_CLAMP_TO_BORDER, GL.GL_CLAMP_TO_EDGE])
self.filter = cycle([(GL.GL_NEAREST, GL.GL_NEAREST),
(GL.GL_LINEAR, GL.GL_LINEAR),
(GL.GL_LINEAR, GL.GL_LINEAR_MIPMAP_LINEAR)])
self.wrap_mode, self.filter_mode = next(self.wrap), next(self.filter)
self.file = texture
# object transparancy
self.alpha = 1.0
# setup texture and upload it to GPU
self.texture = Texture(self.file, self.wrap_mode, *self.filter_mode)
def draw(self, shader, projection, view, model=None, is_fisheye=False, fish_fov=np.pi, win=None, **_kwargs):
if model is None:
model = self.model_matrix
else:
self.model_matrix = model
# some interactive elements
if glfw.get_key(win, glfw.KEY_F6) == glfw.PRESS:
self.wrap_mode = next(self.wrap)
self.texture = Texture(self.file, self.wrap_mode, *self.filter_mode)
if glfw.get_key(win, glfw.KEY_F7) == glfw.PRESS:
self.filter_mode = next(self.filter)
self.texture = Texture(self.file, self.wrap_mode, *self.filter_mode)
GL.glUseProgram(shader.glid)
# projection geometry
loc = GL.glGetUniformLocation(shader.glid, 'modelviewprojection')
GL.glUniformMatrix4fv(loc, 1, True, np.dot(np.dot(projection,view),model))
if is_fisheye:
loc = GL.glGetUniformLocation(shader.glid, 'modelview')
GL.glUniformMatrix4fv(loc, 1, True, mat_from_gl(np.dot(view,model)))
loc = GL.glGetUniformLocation(shader.glid, 'fov')
GL.glUniform1f(loc, fish_fov)
# texture access setups
#GL.glTexEnvf(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, GL.GL_MODULATE)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)
# object transparancy
loc = GL.glGetUniformLocation(shader.glid, 'alpha')
GL.glUniform1f(loc, self.alpha)
#GL.glColor4f(1.0, 1.0, 1.0, 0.5)
loc = GL.glGetUniformLocation(shader.glid, 'diffuseMap')
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture.glid)
GL.glUniform1i(loc, 0)
self.vertex_array.execute(GL.GL_TRIANGLES)
# leave clean state for easier debugging
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glUseProgram(0)
GL.glDisable(GL.GL_BLEND)
# mesh to refactor all previous classes
class ColorMesh(object):
def __init__(self, attributes, index=None):
self.vertex_array = VertexArray(attributes, index)
def draw(self, projection, view, model, color_shader, **param):
names = ['view', 'projection', 'model']
loc = {n: GL.glGetUniformLocation(color_shader.glid, n) for n in names}
GL.glUseProgram(color_shader.glid)
GL.glUniformMatrix4fv(loc['view'], 1, True, view)
GL.glUniformMatrix4fv(loc['projection'], 1, True, projection)
GL.glUniformMatrix4fv(loc['model'], 1, True, model)
# draw triangle as GL_TRIANGLE vertex array, draw array call
self.vertex_array.execute(GL.GL_TRIANGLES)
class SimpleTriangle(ColorMesh):
"""Hello triangle object"""
def __init__(self):
# triangle position buffer
position = | np.array(((0, .5, 0), (.5, -.5, 0), (-.5, -.5, 0)), 'f') | numpy.array |
import os
from os.path import join
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (Generator, MT19937, ThreeFry, PCG32, PCG64,
Philox, Xoshiro256, Xoshiro512, RandomState)
from numpy.random.common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint32(x):
return (x >> np.uint32(9)) * (1.0 / 2 ** 23)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
class Base(object):
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = Xoshiro256
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_seed_types = []
cls.invalid_seed_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_seed_float(self):
# GH #82
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(self.seed_error_type, rs.bit_generator.seed, np.pi)
assert_raises(self.seed_error_type, rs.bit_generator.seed, -np.pi)
def test_seed_float_array(self):
# GH #82
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([np.pi]))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([-np.pi]))
assert_raises(ValueError, rs.bit_generator.seed,
np.array([np.pi, -np.pi]))
assert_raises(TypeError, rs.bit_generator.seed, np.array([0, np.pi]))
assert_raises(TypeError, rs.bit_generator.seed, [np.pi])
assert_raises(TypeError, rs.bit_generator.seed, [0, np.pi])
def test_seed_out_of_range(self):
# GH #82
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(ValueError, rs.bit_generator.seed,
2 ** (2 * self.bits + 1))
assert_raises(ValueError, rs.bit_generator.seed, -1)
def test_seed_out_of_range_array(self):
# GH #82
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(ValueError, rs.bit_generator.seed,
[2 ** (2 * self.bits + 1)])
assert_raises(ValueError, rs.bit_generator.seed, [-1])
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_seed_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
for st in self.invalid_seed_types:
with pytest.raises(TypeError):
bit_generator.seed(*st)
def test_invalid_seed_values(self):
bit_generator = self.bit_generator(*self.data1['seed'])
for st in self.invalid_seed_values:
with pytest.raises(ValueError):
bit_generator.seed(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert_state_equal(state, alt_state)
class TestXoshiro256(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Xoshiro256
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/xoshiro256-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/xoshiro256-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestXoshiro512(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Xoshiro512
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/xoshiro512-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/xoshiro512-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestThreeFry(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = ThreeFry
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/threefry-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/threefry-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = []
cls.invalid_seed_values = [(1, None, 1), (-1,), (2 ** 257 + 1,),
(None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = []
cls.invalid_seed_values = [(1, None, 1), (-1,), (2 ** 257 + 1,),
(None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = [(np.array([1, 2]),), (3.2,),
(None, np.zeros(1))]
cls.invalid_seed_values = [(-1,), (2 ** 129 + 1,), (None, -1),
(None, 2 ** 129 + 1)]
def test_seed_float_array(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([np.pi]))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([-np.pi]))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([np.pi, -np.pi]))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
np.array([0, np.pi]))
assert_raises(self.seed_error_type, rs.bit_generator.seed, [np.pi])
assert_raises(self.seed_error_type, rs.bit_generator.seed, [0, np.pi])
def test_seed_out_of_range_array(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(self.seed_error_type, rs.bit_generator.seed,
[2 ** (2 * self.bits + 1)])
assert_raises(self.seed_error_type, rs.bit_generator.seed, [-1])
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
class TestPCG32(TestPCG64):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG32
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/pcg32-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg32-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_seed_types = [(np.array([1, 2]),), (3.2,),
(None, np.zeros(1))]
cls.invalid_seed_values = [(-1,), (2 ** 129 + 1,), (None, -1),
(None, 2 ** 129 + 1)]
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_seed_types = []
cls.invalid_seed_values = [(-1,), np.array([2 ** 33])]
def test_seed_out_of_range(self):
# GH #82
rs = Generator(self.bit_generator(*self.data1['seed']))
assert_raises(ValueError, rs.bit_generator.seed, 2 ** (self.bits + 1))
| assert_raises(ValueError, rs.bit_generator.seed, -1) | numpy.testing.assert_raises |
import configparser
import os
import sys
import netCDF4 as nc
import numpy as np
from numpy import linalg as LA
import nrrd
from scipy.interpolate import RegularGridInterpolator
from readMRIData import get_rotation_matrix
from readMRIData import plot_lin_plane_fitting_with_bbox
from readMRIData import read_intra_op_points
from readMRIData import rotate_point_by_rotation_matrix
def read_nrrd_file(filepath):
data, header = nrrd.read(filepath)
return data, header
def save_as_netcdf(data, filename):
print('Save data to {}.'.format(filename))
nc_file = nc.Dataset(filename, 'w', format='NETCDF3_CLASSIC')
nc_file.createDimension('nNodes_0', data.shape[0])
nc_file.createDimension('nNodes_1', data.shape[1])
nc_file.createDimension('nNodes_2', data.shape[2])
nc_file.createDimension('time')
brain = nc_file.createVariable('region', 'i2', ('time', 'nNodes_2',
'nNodes_1', 'nNodes_0'))
brain[0,] = np.swapaxes(data, 0, 2)
nc_file.close()
print('Done.')
def data_as_binary_data(data):
binary_data = np.where(data > 1.0, 1, 0)
return binary_data
def get_ijk_to_lps(header):
space_origin = return_string_list_as_float_numpy_array(header['space origin'])
space_origin = np.append(space_origin, 1)
row0 = return_string_list_as_float_numpy_array(header['space directions'][0])
row0 = np.append(row0, 0)
row1 = return_string_list_as_float_numpy_array(header['space directions'][1])
row1 = np.append(row1, 0)
row2 = return_string_list_as_float_numpy_array(header['space directions'][2])
row2 = np.append(row2, 0)
ijk_to_lps = np.array([row0, row1, row2, space_origin]).T
return ijk_to_lps
def ijk_to_lps(ijk, header):
if len(ijk) == 3:
ijk = np.append(ijk, 1)
ijk_to_lps = get_ijk_to_lps(header)
lps = np.dot(ijk_to_lps, ijk)
return lps[0:3]
def ijk_to_ras(ijk, header):
if len(ijk) == 3:
ijk = np.append(ijk, 1)
ijk_to_lps = get_ijk_to_lps(header)
lps_to_ras = np.diag([-1, -1, 1, 1])
lps = np.dot(ijk_to_lps, ijk)
ras = np.matmul(lps, lps_to_ras)
return ras[0:3]
def lps_to_ijk(lps, header):
if len(lps) == 3:
lps = np.append(lps, 1)
ijk_to_lps = get_ijk_to_lps(header)
lps_to_ijk = LA.inv(ijk_to_lps)
ijk = np.dot(lps_to_ijk, lps)
return ijk[0:3]
def ras_to_ijk(ras, header):
if len(ras) == 3:
ras = np.append(ras, 1)
ijk_to_lps = get_ijk_to_lps(header)
lps_to_ijk = LA.inv(ijk_to_lps)
ras_to_lps = | np.diag([-1, -1, 1, 1]) | numpy.diag |
#!/usr/bin/env python
# coding: utf-8
# Let's load the package
import numpy as np
from numba import cuda
# let's prepare the data
np.random.seed(42)
start = np.random.randint(1,10,(1024,1024))
symmetry = np.tril(start,k=0) + | np.tril(start,k=-1) | numpy.tril |
import argparse
import cv2
import mxnet as mx
import numpy as np
from utils import face_preprocess
from utils.mtcnn_detector import MtcnnDetector
parser = argparse.ArgumentParser()
parser.add_argument('--image_size', default='112,112', help='models input size.')
parser.add_argument('--image', default='test.jpg', help='infer image path.')
parser.add_argument('--model', default='model/model,200', help='path to load model.')
parser.add_argument('--mtcnn_model', default='mtcnn-model', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
args = parser.parse_args()
class FaceAgeGenderModel:
def __init__(self, args):
self.args = args
if args.gpu >= 0:
ctx = mx.gpu(args.gpu)
else:
ctx = mx.cpu()
_vec = args.image_size.split(',')
assert len(_vec) == 2
image_size = (int(_vec[0]), int(_vec[1]))
self.model = None
if len(args.model) > 0:
self.model = self.get_model(ctx, image_size, args.model, 'fc1')
self.det_minsize = 50
self.det_threshold = [0.6, 0.7, 0.8]
self.image_size = image_size
detector = MtcnnDetector(model_folder=args.mtcnn_model, ctx=ctx, num_worker=1, accurate_landmark=True,
threshold=self.det_threshold)
print("加载模型:%s" % args.mtcnn_model)
self.detector = detector
# 加载模型
def get_model(self, ctx, image_size, model_str, layer):
_vec = model_str.split(',')
assert len(_vec) == 2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading', prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers[layer + '_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
# 识别人脸
def get_faces(self, face_img):
ret = self.detector.detect_face(face_img)
if ret is None:
return None
bbox, points = ret
if bbox.shape[0] == 0:
return [], [], []
bboxes = []
pointses = []
faces = []
for i in range(len(bbox)):
b = bbox[i, 0:4]
bboxes.append(b)
p = points[i, :].reshape((2, 5)).T
pointses.append(p)
nimg = face_preprocess.preprocess(face_img, b, p, image_size='112,112')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2, 0, 1))
input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
faces.append(db)
return faces, bboxes, pointses
# 性别年龄识别
def get_ga(self, data):
self.model.forward(data, is_train=False)
ret = self.model.get_outputs()[0].asnumpy()
g = ret[:, 0:2].flatten()
gender = | np.argmax(g) | numpy.argmax |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model-Test Coverage Metrics.
"""
from abc import abstractmethod
from collections import defaultdict
import math
import numpy as np
from mindspore import Tensor
from mindspore import Model
from mindspore.train.summary.summary_record import _get_summary_tensor_data
from mindarmour.utils._check_param import check_model, check_numpy_param, check_int_positive, \
check_param_type, check_value_positive
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'CoverageMetrics'
class CoverageMetrics:
"""
The abstract base class for Neuron coverage classes calculating coverage metrics.
As we all known, each neuron output of a network will have a output range after training (we call it original
range), and test dataset is used to estimate the accuracy of the trained network. However, neurons' output
distribution would be different with different test datasets. Therefore, similar to function fuzz, model fuzz means
testing those neurons' outputs and estimating the proportion of original range that has emerged with test
datasets.
Reference: `DeepGauge: Multi-Granularity Testing Criteria for Deep Learning Systems
<https://arxiv.org/abs/1803.07519>`_
Args:
model (Model): The pre-trained model which waiting for testing.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, incremental=False, batch_size=32):
self._model = check_model('model', model, Model)
self.incremental = check_param_type('incremental', incremental, bool)
self.batch_size = check_int_positive('batch_size', batch_size)
self._activate_table = defaultdict(list)
@abstractmethod
def get_metrics(self, dataset):
"""
Calculate coverage metrics of given dataset.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Raises:
NotImplementedError: It is an abstract method.
"""
msg = 'The function get_metrics() is an abstract method in class `CoverageMetrics`, and should be' \
' implemented in child class.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def _init_neuron_activate_table(self, data):
"""
Initialise the activate table of each neuron in the model with format:
{'layer1': [n1, n2, n3, ..., nn], 'layer2': [n1, n2, n3, ..., nn], ...}
Args:
data (numpy.ndarray): Data used for initialising the activate table.
Return:
dict, return a activate_table.
"""
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
if not layer_out:
msg = 'User must use TensorSummary() operation to specify the middle layer of the model participating in ' \
'the coverage calculation.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
activate_table = defaultdict()
for layer, value in layer_out.items():
activate_table[layer] = np.zeros(value.shape[1], np.bool)
return activate_table
def _get_bounds(self, train_dataset):
"""
Update the lower and upper boundaries of neurons' outputs.
Args:
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
Return:
- numpy.ndarray, upper bounds of neuron' outputs.
- numpy.ndarray, lower bounds of neuron' outputs.
"""
upper_bounds = defaultdict(list)
lower_bounds = defaultdict(list)
batches = math.ceil(train_dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = train_dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
min_value = np.min(value, axis=0)
max_value = np.max(value, axis=0)
if np.any(upper_bounds[layer]):
max_flag = upper_bounds[layer] > max_value
min_flag = lower_bounds[layer] < min_value
upper_bounds[layer] = upper_bounds[layer] * max_flag + max_value * (1 - max_flag)
lower_bounds[layer] = lower_bounds[layer] * min_flag + min_value * (1 - min_flag)
else:
upper_bounds[layer] = max_value
lower_bounds[layer] = min_value
return upper_bounds, lower_bounds
def _activate_rate(self):
"""
Calculate the activate rate of neurons.
"""
total_neurons = 0
activated_neurons = 0
for _, value in self._activate_table.items():
activated_neurons += np.sum(value)
total_neurons += len(value)
activate_rate = activated_neurons / total_neurons
return activate_rate
class NeuronCoverage(CoverageMetrics):
"""
Calculate the neurons activated coverage. Neuron is activated when its output is greater than the threshold.
Neuron coverage equals the proportion of activated neurons to total neurons in the network.
Args:
model (Model): The pre-trained model which waiting for testing.
threshold (float): Threshold used to determined neurons is activated or not. Default: 0.1.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, threshold=0.1, incremental=False, batch_size=32):
super(NeuronCoverage, self).__init__(model, incremental, batch_size)
threshold = check_param_type('threshold', threshold, float)
self.threshold = check_value_positive('threshold', threshold)
def get_metrics(self, dataset):
"""
Get the metric of neuron coverage: the proportion of activated neurons to total neurons in the network.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'neuron coverage'.
Examples:
>>> nc = NeuronCoverage(model, threshold=0.1)
>>> nc_metrics = nc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.threshold, axis=0) > 0
self._activate_table[layer] = | np.logical_or(self._activate_table[layer], activate) | numpy.logical_or |
import os
import numpy as np
import flopy
import warnings
from io import StringIO
from struct import pack
from tempfile import TemporaryFile
from textwrap import dedent
from flopy.utils.util_array import Util2d, Util3d, Transient2d, Transient3d
from ci_framework import base_test_dir, FlopyTestSetup
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
def test_load_txt_free():
a = np.ones((10,), dtype=np.float32) * 250.0
fp = StringIO("10*250.0")
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
0 1,2,3, 4
5 6, 7, 8 9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32)
a[1, 0] = 2.2
fp = StringIO(
dedent(
"""\
5*1.0
2.2 2*1.0, +1E-00 1.0
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_txt_fixed():
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
01234X
56789
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
fp = StringIO(
dedent(
"""\
0123X
4
5678
9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(4I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.array([[-1, 1, -2, 2, -3], [3, -4, 4, -5, 5]], np.int32)
fp = StringIO(
dedent(
"""\
-1 1-2 2-3
3 -44 -55
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I2)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_block():
a = np.ones((2, 5), dtype=np.int32) * 4
fp = StringIO(
dedent(
"""\
1
1 2 1 5 4
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32) * 4
a[0:2, 1:2] = 9.0
a[0, 2:4] = 6.0
fp = StringIO(
dedent(
"""\
3
1 2 1 5 4.0
1 2 2 2 9.0
1 1 3 4 6.0
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.zeros((2, 5), dtype=np.int32)
a[0, 2:4] = 8
fp = StringIO(
dedent(
"""\
1
1 1 3 4 8
"""
)
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fa = Util2d.load_block(a.shape, fp, a.dtype)
assert len(w) == 1
assert "blocks do not cover full array" in str(w[-1].message)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_bin():
model_ws = f"{base_dir}_test_load_bin"
test_setup = FlopyTestSetup(test_dirs=model_ws)
def temp_file(data):
# writable file that is destroyed as soon as it is closed
f = TemporaryFile(dir=model_ws)
f.write(data)
f.seek(0)
return f
# INTEGER
a = np.arange(3 * 4, dtype=np.int32).reshape((3, 4)) - 1
fp = temp_file(a.tobytes())
fh, fa = Util2d.load_bin((3, 4), fp, np.int32)
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
# check warning if wrong integer type is used to read 4-byte integers
# e.g. on platforms where int -> int64
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fp.seek(0)
fh, fa = Util2d.load_bin((3, 4), fp, np.int64)
fp.close()
assert len(w) == 1
assert a.dtype == np.int32
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
# REAL
real_header_fmt = "2i2f16s3i"
header_data = (1, 2, 3.5, 4.5, b"Hello", 6, 7, 8)
real_header = pack(real_header_fmt, *header_data)
assert len(real_header) == 44
a = np.arange(10).reshape((2, 5))
fp = temp_file(real_header + pack("10f", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float32, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float32), fa)
assert fa.dtype == np.float32
# DOUBLE PRECISION
dbl_header_fmt = "2i2d16s3i"
dbl_header = pack(dbl_header_fmt, *header_data)
assert len(dbl_header) == 52
fp = temp_file(real_header + pack("10d", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float64, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float64), fa)
assert fa.dtype == np.float64
def test_transient2d():
ml = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ml, nlay=10, nrow=10, ncol=10, nper=3)
t2d = Transient2d(ml, (10, 10), np.float32, 10.0, "fake")
a1 = t2d.array
assert a1.shape == (3, 1, 10, 10), a1.shape
t2d.cnstnt = 2.0
assert np.array_equal(t2d.array, np.zeros((3, 1, 10, 10)) + 20.0)
t2d[0] = 1.0
t2d[2] = 999
assert np.array_equal(t2d[0].array, np.ones((ml.nrow, ml.ncol)))
assert np.array_equal(t2d[2].array, np.ones((ml.nrow, ml.ncol)) * 999)
m4d = t2d.array
t2d2 = Transient2d.from_4d(ml, "rch", {"rech": m4d})
m4d2 = t2d2.array
assert np.array_equal(m4d, m4d2)
def test_transient3d():
nlay = 3
nrow = 4
ncol = 5
nper = 5
ml = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(
ml, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
# Make a transient 3d array of a constant value
t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, 10.0, "fake")
a1 = t3d.array
assert a1.shape == (nper, nlay, nrow, ncol), a1.shape
# Make a transient 3d array with changing entries and then verify that
# they can be reproduced through indexing
a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape(
(nlay, nrow, ncol)
)
t3d = {0: a, 2: 1025, 3: a, 4: 1000.0}
t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, t3d, "fake")
assert np.array_equal(t3d[0].array, a)
assert np.array_equal(t3d[1].array, a)
assert np.array_equal(t3d[2].array, np.zeros((nlay, nrow, ncol)) + 1025.0)
assert np.array_equal(t3d[3].array, a)
assert np.array_equal(t3d[4].array, np.zeros((nlay, nrow, ncol)) + 1000.0)
# Test changing a value
t3d[0] = 1.0
assert np.array_equal(t3d[0].array, np.zeros((nlay, nrow, ncol)) + 1.0)
# Check itmp and file_entry
itmp, file_entry_dense = t3d.get_kper_entry(0)
assert itmp == 1
itmp, file_entry_dense = t3d.get_kper_entry(1)
assert itmp == -1
def test_util2d():
model_ws = f"{base_dir}_test_util2d"
test_setup = FlopyTestSetup(test_dirs=model_ws)
ml = flopy.modflow.Modflow(model_ws=model_ws)
u2d = Util2d(ml, (10, 10), np.float32, 10.0, "test")
a1 = u2d.array
a2 = np.ones((10, 10), dtype=np.float32) * 10.0
assert np.array_equal(a1, a2)
# test external filenames - ascii and binary
fname_ascii = os.path.join(model_ws, "test_a.dat")
fname_bin = os.path.join(model_ws, "test_b.dat")
np.savetxt(fname_ascii, a1, fmt="%15.6E")
u2d.write_bin(a1.shape, fname_bin, a1, bintype="head")
dis = flopy.modflow.ModflowDis(ml, 2, 10, 10)
lpf = flopy.modflow.ModflowLpf(ml, hk=[fname_ascii, fname_bin])
ml.lpf.hk[1].fmtin = "(BINARY)"
assert np.array_equal(lpf.hk[0].array, a1)
assert np.array_equal(lpf.hk[1].array, a1)
# test external filenames - ascii and binary with model_ws and external_path
ml = flopy.modflow.Modflow(
model_ws=model_ws, external_path=os.path.join(model_ws, "ref")
)
u2d = Util2d(ml, (10, 10), np.float32, 10.0, "test")
fname_ascii = os.path.join(model_ws, "test_a.dat")
fname_bin = os.path.join(model_ws, "test_b.dat")
np.savetxt(fname_ascii, a1, fmt="%15.6E")
u2d.write_bin(a1.shape, fname_bin, a1, bintype="head")
dis = flopy.modflow.ModflowDis(ml, 2, 10, 10)
lpf = flopy.modflow.ModflowLpf(ml, hk=[fname_ascii, fname_bin])
ml.lpf.hk[1].fmtin = "(BINARY)"
assert np.array_equal(lpf.hk[0].array, a1)
assert np.array_equal(lpf.hk[1].array, a1)
# bin read write test
fname = os.path.join(model_ws, "test.bin")
u2d.write_bin((10, 10), fname, u2d.array)
a3 = u2d.load_bin((10, 10), fname, u2d.dtype)[1]
assert np.array_equal(a3, a1)
# ascii read write test
fname = os.path.join(model_ws, "text.dat")
u2d.write_txt((10, 10), fname, u2d.array)
a4 = u2d.load_txt((10, 10), fname, u2d.dtype, "(FREE)")
assert np.array_equal(a1, a4)
# fixed format read/write with touching numbers - yuck!
data = np.arange(100).reshape(10, 10)
u2d_arange = Util2d(ml, (10, 10), np.float32, data, "test")
u2d_arange.write_txt(
(10, 10), fname, u2d_arange.array, python_format=[7, "{0:10.4E}"]
)
a4a = u2d.load_txt((10, 10), fname, np.float32, "(7E10.6)")
assert np.array_equal(u2d_arange.array, a4a)
# test view vs copy with .array
a5 = u2d.array
a5 += 1
assert not np.array_equal(a5, u2d.array)
# Util2d.__mul__() overload
new_2d = u2d * 2
assert np.array_equal(new_2d.array, u2d.array * 2)
# test the cnstnt application
u2d.cnstnt = 2.0
a6 = u2d.array
assert not np.array_equal(a1, a6)
u2d.write_txt((10, 10), fname, u2d.array)
a7 = u2d.load_txt((10, 10), fname, u2d.dtype, "(FREE)")
assert np.array_equal(u2d.array, a7)
def stress_util2d(model_ws, ml, nlay, nrow, ncol):
dis = flopy.modflow.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol)
hk = np.ones((nlay, nrow, ncol))
vk = np.ones((nlay, nrow, ncol)) + 1.0
# save hk up one dir from model_ws
fnames = []
for i, h in enumerate(hk):
fname = os.path.join(ml._model_ws, f"test_{i}.ref")
fnames.append(fname)
np.savetxt(fname, h, fmt="%15.6e", delimiter="")
vk[i] = i + 1.0
lpf = flopy.modflow.ModflowLpf(ml, hk=fnames, vka=vk)
# util2d binary check
ml.lpf.vka[0].format.binary = True
# util3d cnstnt propagation test
ml.lpf.vka.cnstnt = 2.0
ml.write_input()
# check that binary is being respect - it can't get no respect!
vka_1 = ml.lpf.vka[0]
a = vka_1.array
vka_1_2 = vka_1 * 2.0
assert np.array_equal(a * 2.0, vka_1_2.array)
if ml.external_path is not None:
files = os.listdir(os.path.join(ml.model_ws, ml.external_path))
else:
files = os.listdir(ml.model_ws)
print("\n\nexternal files: " + ",".join(files) + "\n\n")
ml1 = flopy.modflow.Modflow.load(
ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
)
print("testing load")
assert not ml1.load_fail
# check that both binary and cnstnt are being respected through
# out the write and load process.
assert np.array_equal(ml1.lpf.vka.array, vk * 2.0)
assert np.array_equal(ml1.lpf.vka.array, ml.lpf.vka.array)
assert np.array_equal(ml1.lpf.hk.array, hk)
assert np.array_equal(ml1.lpf.hk.array, ml.lpf.hk.array)
print("change model_ws")
ml.model_ws = os.path.join(model_ws, "new")
ml.write_input()
if ml.external_path is not None:
files = os.listdir(os.path.join(ml.model_ws, ml.external_path))
else:
files = os.listdir(ml.model_ws)
print("\n\nexternal files: " + ",".join(files) + "\n\n")
ml1 = flopy.modflow.Modflow.load(
ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
)
print("testing load")
assert not ml1.load_fail
assert np.array_equal(ml1.lpf.vka.array, vk * 2.0)
assert np.array_equal(ml1.lpf.hk.array, hk)
# more binary testing
ml.lpf.vka[0]._array[0, 0] *= 3.0
ml.write_input()
ml1 = flopy.modflow.Modflow.load(
ml.namefile, model_ws=ml.model_ws, verbose=True, forgive=False
)
assert np.array_equal(ml.lpf.vka.array, ml1.lpf.vka.array)
assert np.array_equal(ml.lpf.hk.array, ml1.lpf.hk.array)
def stress_util2d_for_joe_the_file_king(ml, nlay, nrow, ncol):
dis = flopy.modflow.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol)
hk = np.ones((nlay, nrow, ncol))
vk = np.ones((nlay, nrow, ncol)) + 1.0
# save hk up one dir from model_ws
fnames = []
for i, h in enumerate(hk):
fname = os.path.join(ml._model_ws, f"test_{i}.ref")
fnames.append(fname)
np.savetxt(fname, h, fmt="%15.6e", delimiter="")
vk[i] = i + 1.0
lpf = flopy.modflow.ModflowLpf(ml, hk=fnames, vka=vk)
ml.lpf.vka[0].format.binary = True
ml.lpf.vka.cnstnt = 2.0
ml.write_input()
assert np.array_equal(ml.lpf.hk.array, hk)
assert | np.array_equal(ml.lpf.vka.array, vk * 2.0) | numpy.array_equal |
from __future__ import division, absolute_import, print_function
import sys
from itertools import product
import numpy as np
from numpy.core import zeros, float64
from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \
assert_raises, assert_array_equal, assert_allclose, assert_equal
from numpy.core.multiarray import inner as inner_
DECPREC = 14
class TestInner(TestCase):
def test_vecself(self):
"""Ticket 844."""
# Inner product of a vector with itself segfaults or give meaningless
# result
a = zeros(shape = (1, 80), dtype = float64)
p = inner_(a, a)
assert_almost_equal(p, 0, decimal = DECPREC)
try:
import numpy.core._dotblas as _dotblas
except ImportError:
_dotblas = None
@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas")
def test_blasdot_used():
from numpy.core import dot, vdot, inner, alterdot, restoredot
assert_(dot is _dotblas.dot)
assert_(vdot is _dotblas.vdot)
assert_(inner is _dotblas.inner)
assert_(alterdot is _dotblas.alterdot)
assert_(restoredot is _dotblas.restoredot)
def test_dot_2args():
from numpy.core import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args():
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
np.dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = np.dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is np.dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = np.dot(f, v)
assert_(r is np.dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors():
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, np.dot, f, v, r)
assert_raises(ValueError, np.dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, np.dot, f, v, r[:, ::2])
assert_raises(ValueError, np.dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, np.dot, f, v, r)
def test_dot_array_order():
""" Test numpy dot with different order C, F
Comparing results with multiarray dot.
Double and single precisions array are compared using relative
precision of 7 and 5 decimals respectively.
Use 30 decimal when comparing exact operations like:
(a.b)' = b'.a'
"""
_dot = np.core.multiarray.dot
a_dim, b_dim, c_dim = 10, 4, 7
orders = ["C", "F"]
dtypes_prec = {np.float64: 7, np.float32: 5}
np.random.seed(7)
for arr_type, prec in dtypes_prec.items():
for a_order in orders:
a = np.asarray(np.random.randn(a_dim, a_dim),
dtype=arr_type, order=a_order)
assert_array_equal( | np.dot(a, a) | numpy.dot |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Program Title: kinematic_model.py
# Program Purpose: Define EEZYbotARM_kinematics parent class and EEZYbotARM_Mk2 child class
# **Version control**
# v3.1 -> adding save of np array for complex hull, adding complex hull function (this works in very basic form)
# v3.2 -> getting the complex hull function to pass data to a overall ploting function (it's also working)
# v3.3 -> troubleshooting why the convex hull and workspace isn't quite right! [19 Nov 19]
# v3.4 -> checking demo functionality
# v3.5 -> moving to github
# v3.6 -> adding MK1 version of the EEZYbotARM as a new sub-class
# v3.7 -> squishing bug for Mk1 inverse kinematics ----> Solved :) !
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# ------------------------------------------#
# Imports #
# ------------------------------------------#
from math import * # for maths
import matplotlib.pyplot as plt # for plotting
from mpl_toolkits.mplot3d import Axes3D # for 3D plotting
# [PUNCH LIST] NEED TO DO SOMETHING ABOUT THIS -> SAME AS ABOVE !
import mpl_toolkits.mplot3d as a3
import numpy as np # for vector arithmetic
import pathlib # for dealing with files
# [PUNCH LIST] needed? -> it's only used to generate a random number, I should be able to delete (pending)
import scipy as sp
# [PUNCH LIST] needed for plotting the complex hull -> I think i can do this in another way!
import pylab as pl
from scipy.spatial import ConvexHull
import pickle # used for saving python files
# ------------------------------------------#
# Helper functions #
# ------------------------------------------#
def plotCoOrd(T0toN, ax, lineColor):
"""
Plot a co-ordinate frame for a transformation matrix (in this case for a joint) using unit vectors.
The plot is made using 3 'quiver objects' (arrows)
--Parameters--
@T0toN -> the co-ordinate frame to plot
@ax -> the matplotlib 3d axes object to make the plot on
@lineColor -> the color the co-ordinate frame
"""
# Invert T0toN because we are rotating from the world frame to the N frame
TNto0 = | np.linalg.inv(T0toN) | numpy.linalg.inv |
from utils.pointnet import PointNet
from utils.dualnet import DualNet
from utils.datasets import MonoDataset, DuoDataset
import argparse
import numpy as np
import datetime
import os
import shutil
current_time = datetime.datetime.now()
prefix = current_time.strftime("%m-%d:%H:%M") + "fusion/"
path = "test_results/" + prefix
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
# initialize final variables for return
test_acc = []
test_conf = []
test_energy = []
train_paths = ["train/zero/", "train/one/", "train/two/", "train/three/", "train/four/", "train/five/", "train/thumbup/", "train/ell/", "train/frame/", "train/bird/"]
test_paths = ["test/zero/", "test/one/", "test/two/", "test/three/", "test/four/", "test/five/", "test/thumbup/", "test/ell/", "test/frame/", "test/bird/"]
# initialize running variables for collection
trial_acc = []
trial_conf = np.zeros((len(test_paths),len(test_paths)))
trial_energy = np.zeros(len(test_paths)*201)
# IMPORTANT
num_trials = 25
epochs = 100
bs = 67
num_points = 320
# Single test LL
dataset = MonoDataset(left=True, right=False, num_points=num_points, file_paths=train_paths)
test_dataset = MonoDataset(left=True, right=False, num_points=num_points, file_paths=test_paths)
for j in range(num_trials):
print("LL: ", j)
pnt = PointNet(num_points=320, num_classes=len(test_paths), num_epoch=epochs, batchsize=bs, ptype='small', alpha=0.002, beta=0.01)
res = pnt.train(dataset, test_dataset)
trial_acc.append(res[0])
trial_conf += res[1]
trial_energy += res[2]
test_acc.append(trial_acc)
test_conf.append(trial_conf)
test_energy.append(trial_energy)
# reinitialize running variables for collection
trial_acc = []
trial_conf = np.zeros((len(test_paths),len(test_paths)))
trial_energy = np.zeros(len(test_paths)*201)
np.save(path + "acc", np.array(test_acc))
np.save(path + "conf", np.array(test_conf))
np.save(path + "energy", np.array(test_energy))
# Single test RR
dataset = MonoDataset(left=False, right=True, num_points=num_points, file_paths=train_paths)
test_dataset = MonoDataset(left=False, right=True, num_points=num_points, file_paths=test_paths)
for j in range(num_trials):
print("RR: ", j)
pnt = PointNet(num_points=320, num_classes=len(test_paths), num_epoch=epochs, batchsize=bs, ptype='small', alpha=0.002, beta=0.01)
res = pnt.train(dataset, test_dataset)
trial_acc.append(res[0])
trial_conf += res[1]
trial_energy += res[2]
test_acc.append(trial_acc)
test_conf.append(trial_conf)
test_energy.append(trial_energy)
# reinitialize running variables for collection
trial_acc = []
trial_conf = np.zeros((len(test_paths),len(test_paths)))
trial_energy = np.zeros(len(test_paths)*201)
np.save(path + "acc", np.array(test_acc))
np.save(path + "conf", np.array(test_conf))
np.save(path + "energy", np.array(test_energy))
# Single test F
dataset = MonoDataset(left=True, right=True, num_points=num_points, file_paths=train_paths)
test_dataset = MonoDataset(left=True, right=True, num_points=num_points, file_paths=test_paths)
for j in range(num_trials):
print("F: ", j)
pnt = PointNet(num_points=320, num_classes=len(test_paths), num_epoch=epochs, batchsize=bs, ptype='small', alpha=0.002, beta=0.01)
res = pnt.train(dataset, test_dataset)
trial_acc.append(res[0])
trial_conf += res[1]
trial_energy += res[2]
test_acc.append(trial_acc)
test_conf.append(trial_conf)
test_energy.append(trial_energy)
# reinitialize running variables for collection
trial_acc = []
trial_conf = np.zeros((len(test_paths),len(test_paths)))
trial_energy = np.zeros(len(test_paths)*201)
np.save(path + "acc", np.array(test_acc))
np.save(path + "conf", | np.array(test_conf) | numpy.array |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Functions to craft features.
"""
import warnings
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import binary_opening
from skimage.morphology.selem import disk
import bigfish.stack as stack
from .input_preparation import prepare_extracted_data
# ### Main functions ###
def compute_features(cell_mask, nuc_mask, ndim, rna_coord, smfish=None,
voxel_size_yx=None, foci_coord=None,
centrosome_coord=None,
compute_distance=False, compute_intranuclear=False,
compute_protrusion=False, compute_dispersion=False,
compute_topography=False, compute_foci=False,
compute_area=False, compute_centrosome=False,
return_names=False):
"""Compute requested features.
Parameters
----------
cell_mask : np.ndarray, np.uint, np.int or bool
Surface of the cell with shape (y, x).
nuc_mask: np.ndarray, np.uint, np.int or bool
Surface of the nucleus with shape (y, x).
ndim : int
Number of spatial dimensions to consider (2 or 3).
rna_coord : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx dimensions)
plus the index of the cluster assigned to the spot. If no cluster was
assigned, value is -1. If cluster id is not provided foci related
features are not computed.
smfish : np.ndarray, np.uint
Image of RNAs, with shape (y, x).
voxel_size_yx : int, float or None
Size of a voxel on the yx plan, in nanometer.
foci_coord : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx coordinates), the number of
spots detected in the foci and its index.
centrosome_coord : np.ndarray, np.int64
Coordinates of the detected centrosome with shape (nb_elements, 3) or
(nb_elements, 2). One coordinate per dimension (zyx or yx dimensions).
These coordinates are mandatory to compute centrosome related features.
compute_distance : bool
Compute distance related features.
compute_intranuclear : bool
Compute nucleus related features.
compute_protrusion : bool
Compute protrusion related features.
compute_dispersion : bool
Compute dispersion indices.
compute_topography : bool
Compute topographic features.
compute_foci : bool
Compute foci related features.
compute_area : bool
Compute area related features.
compute_centrosome : bool
Compute centrosome related features.
return_names : bool
Return features names.
Returns
-------
features : np.ndarray, np.float32
Array of features.
"""
# check parameters
stack.check_parameter(voxel_size_yx=(int, float, type(None)),
compute_distance=bool,
compute_intranuclear=bool,
compute_protrusion=bool,
compute_dispersion=bool,
compute_topography=bool,
compute_foci=bool,
compute_area=bool,
compute_centrosome=bool,
return_names=bool)
if smfish is not None:
stack.check_array(smfish, ndim=[2, 3], dtype=[np.uint8, np.uint16])
if smfish.ndim == 3:
smfish = stack.maximum_projection(smfish)
if foci_coord is not None:
stack.check_array(foci_coord, ndim=2, dtype=np.int64)
# prepare input data
(cell_mask,
distance_cell, distance_cell_normalized,
centroid_cell, distance_centroid_cell,
nuc_mask, cell_mask_out_nuc,
distance_nuc, distance_nuc_normalized,
centroid_nuc, distance_centroid_nuc,
rna_coord_out_nuc,
centroid_rna, distance_centroid_rna,
centroid_rna_out_nuc, distance_centroid_rna_out_nuc,
distance_centrosome) = prepare_extracted_data(
cell_mask, nuc_mask, ndim, rna_coord, centrosome_coord)
# initialization
features = ()
names_features_distance = False
names_features_intranuclear = False
names_features_protrusion = False
names_features_dispersion = False
names_features_topography = False
names_features_foci = False
names_features_area = False
names_features_centrosome = False
# distance related features
if compute_distance:
features += features_distance(
rna_coord, distance_cell, distance_nuc, cell_mask, ndim, False)
names_features_distance = True
# nucleus related features
if compute_intranuclear:
features += features_in_out_nucleus(
rna_coord, rna_coord_out_nuc, False)
names_features_intranuclear = True
# protrusion related features
if compute_protrusion:
features += features_protrusion(
rna_coord, cell_mask, nuc_mask, ndim, voxel_size_yx, False)
names_features_protrusion = True
# dispersion indices
if compute_dispersion and smfish is not None:
features += features_dispersion(
smfish, rna_coord, centroid_rna, cell_mask, centroid_cell,
centroid_nuc, ndim, False)
names_features_dispersion = True
elif compute_dispersion and smfish is None:
raise ValueError("Dispersion features can't be computed because "
"'smfish' is not provided.")
# topographic features
if compute_topography and voxel_size_yx is not None:
features += features_topography(
rna_coord, cell_mask, nuc_mask, cell_mask_out_nuc, ndim,
voxel_size_yx, False)
names_features_topography = True
elif compute_topography and voxel_size_yx is None:
raise ValueError("Topographic features can't be computed because "
"'voxel_size_yx' is not provided.")
# foci related features
if compute_foci and foci_coord is not None:
features += features_foci(
rna_coord, foci_coord, ndim, False)
names_features_foci = True
elif compute_foci and foci_coord is None:
raise ValueError("Foci related features can't be computed because "
"'foci_coord' is not provided.")
# area related features
if compute_area:
features += features_area(
cell_mask, nuc_mask, cell_mask_out_nuc, False)
names_features_area = True
# centrosome related features
if (compute_centrosome and centrosome_coord is not None
and voxel_size_yx is not None and smfish is not None):
features += features_centrosome(
smfish, rna_coord, distance_centrosome, cell_mask, ndim,
voxel_size_yx, False)
names_features_centrosome = True
elif compute_centrosome and centrosome_coord is None:
raise ValueError("Centrosome related features can't be computed "
"because 'centrosome_coord' is not provided.")
elif compute_centrosome and voxel_size_yx is None:
raise ValueError("Centrosome related features can't be computed "
"because 'voxel_size_yx' is not provided.")
elif compute_centrosome and smfish is None:
raise ValueError("Centrosome related features can't be computed "
"because 'smfish' is not provided.")
# format features
features = np.array(features, dtype=np.float32)
features = | np.round(features, decimals=2) | numpy.round |
#!/usr/bin/env python
# coding: utf-8
# <img src="imagenes/rn3.png" width="200">
# <img src="http://www.identidadbuho.uson.mx/assets/letragrama-rgb-150.jpg" width="200">
# # [Curso de Redes Neuronales](https://curso-redes-neuronales-unison.github.io/Temario/)
#
# # Redes neuronales multicapa y el algoritmo de *b-prop*
#
# [**<NAME>**](http://mat.uson.mx/~juliowaissman/), 22 de febrero de 2018.
#
# En esta libreta vamos a practicar con el algoritmo básico para realizar reconocimiento en redes neuronales hacia adelante y establecer una estructura básica para simular cn fines de comprensión. Para aplicaciones reales vamos a utilizar herramientas poderosas como [Tensorflow](https://www.tensorflow.org), pero es importante hacer una primer red neuronal simple a pie con el objetivo de entender mejor los mecanismos básicos.
#
# Como dijo Jack el destripador, vamos por partes, y empecemos con asumir que tenemos la especificación completa de la red neuronal y lo que queremos es poder generar una red neuronal inicial, o poder recuperar una red existente previamente guardada.
#
# Empecemos por inicializar los modulos que vamos a requerir.
# In[ ]:
import numpy as np
import _pickle as cPickle
# ## 1. Especificando una red neuronal
#
# Primero, para poder hacer una red neuronal, tenemos que determinar cierta información. La información importante que debemos de especificar cuando hacemos una redes neuronales es:
#
# - Cuantas capas de neuronas tiene la red neuronal, $L$.
# - Cuantas neuronas va a tener cada capa $[n_0, n_1, \ldots, n_L]$, donde $n_0$ es el número de entradas y $n_L$ el número de salidas.
# - Cual es la función de activación de las neuronas ocultas (logística, lineal rectificada, ...).
# - Cual es el tipo de salida de mi red neuronal (lineal, logística, unidad softmax, ... ).
# - Los valores con los que se normalizan los datos de entrada a la red neuronal (para el aprendizaje en una red neuronal es muy importante que los valores de entrada estén normalizados).
#
# Una vez que se establecen estos valores, es necesario generar una lista de matrices $[W^{(1)}, \ldots, W^{(L)}]$ donde $W^{(l)}$ es una matriz de dimensiones $(n_l, n_{l-1})$ de pesos. Igualmente es necesario generar una lista de vectores $[b^{(1)}, \ldots, b^{(L)}]$ donde $b^{(l)}$ es un vector de $n_l$ elementos llamados sesgos.
#
# Si se inicializan los valores de las entradas de $W^{(l)}$ y $b^{(l)}$ iguales, es equivalente a tener una sola neurona en esa capa, por lo que es necesario que estos valores sean diferentes. Para este ejemplo y con el fin de simplificar las operaciones de aprendizaje más adelante, vamos a asumir que la función de activación siempre será la función logística.
#
# Para efectos de un mejor aprendizaje, y asumiendo que la función de activación es la logistica, es importante que los valores iniciales de los pesos se encuentren en la zona donde casuan más variacion la función logística. Si asumimos que las entradas a cada neurona están normalizadas (esto es, entre 0 y 1), entonces los pesos deberían ser valores entre $(-\sqrt{n_{l-1}}, \sqrt{n_{l-1}})$ con el fin que la suma se encuentre en la región donde más cambios ocurren en la función logística.
#
# Vamos a generar y guardar esta información en un diccionario (junto con el resto de la información que requeriramos para tener una red neuronal completamente definida. Al principio los valores de normalización no cuentan ya que estos se deben inicializar al comienzo del aprendizaje. Fijate bien que naturalmente la red neuronal se debería programar como una clase, pero para evitar complejidad que no podamos mantener en TensoFlow vamos a dejarlo todo el código en forma estructurada (solo para dejar constancia).
#
# **Completa el código para inicializar la red neuronal**
# In[ ]:
def inicializa_red_neuronal(capas, tipo):
"""
Inicializa una red neuronal como un diccionario de datos.
Se asume en este caso que la función de activación es la función logística
Parámetros
----------
neuronas_por_capa: Una lista de enteros donde el primer elemento es el número de entradas
y el último el número de salidas, mientras que los intermedios son
el númerode neuronas en cada capa oculta.
tipo: Un string entre {'lineal', 'logistica', 'softmax'} con el tipo de función de salida de la red.
Devuelve
--------
Un diccionario `rn` tal que
- rn['capas'] = [n0, n1, ..., nL] neuronas por capa
- rn['tipo'] = tipo
- rn['W'] = [None, W1, ..., WL] lista de matrices de pesos
- rn['b'] = [None, b1, ..., bL] lista de sesgos
- rn['mu'] = lista de medias de cada atributo (se inicializan con puros 0)
- rn['std'] = lista de desviaciones estandard de cada atributo (se inicializan con puros 1)
"""
rn = {'capas': len(capas), 'tipo': tipo}
rn['mu'] = np.zeros(capas[0])
rn['std'] = np.ones(capas[0])
rn['W'], rn['b'] = inicializa_Wb(capas)
return rn
def inicializa_Wb(capas):
"""
Inicializa una matriz de valores aleatorios W
Parámetros
----------
capas: [n0, n1, ..., nL] número de neuronas por capa
Devuelve
--------
W, b donde W = [None, W1, ..., WL] y b = [None, b1, ..., bL]
"""
#------------------------------------------------------------------------
# Agregua aqui tu código
W = [None] + [np.random.rand(capas[l],capas[l-1]) for l in range(1,len(capas))]
b = [None] + [ | np.random.rand(capas[l]) | numpy.random.rand |
# pylint: disable=too-few-public-methods, method-hidden
""" Contains Sampler-classes. """
import warnings
from copy import copy
import numpy as np
try:
import scipy.stats as ss
except ImportError:
pass
from .utils_random import make_rng
# aliases for Numpy, Scipy-Stats, TensorFlow-samplers
ALIASES = {
'n': {'np': 'normal', 'ss': 'norm', 'tf': 'Normal'},
'u': {'np': 'uniform', 'ss': 'uniform', 'tf': 'Uniform'},
'mvn': {'np': 'multivariate_normal', 'ss': 'multivariate_normal'},
'e': {'np': 'exponential', 'ss': 'expon', 'tf': 'Exponential'},
'g': {'np': 'gamma', 'ss': 'gamma', 'tf': 'Gamma'},
'be' : {'np': 'beta', 'ss': 'beta', 'tf': 'Beta'},
'mnm': {'np': 'multinomial', 'ss': 'multinomial', 'tf': 'Multinomial'},
'f': {'np': 'f', 'ss': 'f'},
'p': {'np': 'poisson', 'ss': 'poisson'},
'w': {'np': 'weibull', 'ss': 'dweibull'},
'ln': {'np': 'lognormal', 'ss': 'lognorm'},
'b' : {'np': 'binomial', 'ss': 'binom'},
'chi2': {'np': 'chisquare', 'ss': 'chi2'},
'c': {'np': 'choice'}
}
def _get_method_by_alias(alias, module, tf_distributions=None):
""" Fetch fullname of a randomizer from ``scipy.stats``, ``tensorflow`` or
``numpy`` by its alias or fullname.
"""
rnd_submodules = {'np': np.random,
'tf': tf_distributions,
'ss': ss}
# fetch fullname
fullname = ALIASES.get(alias, {module: alias for module in ['np', 'tf', 'ss']}).get(module, None)
if fullname is None:
raise ValueError("Distribution %s has no implementaion in module %s" % (alias, module))
# check that the randomizer is implemented in corresponding module
if not hasattr(rnd_submodules[module], fullname):
raise ValueError("Distribution %s has no implementaion in module %s" % (fullname, module))
return fullname
def arithmetize(cls):
""" Add arithmetic operations to Sampler-class.
"""
for oper in ['__add__', '__mul__', '__truediv__', '__sub__', '__pow__', '__floordiv__', '__mod__',
'__radd__', '__rmul__', '__rtruediv__', '__rsub__', '__rpow__', '__rfloordiv__', '__rmod__']:
def transform(self, other, fake=oper):
""" Arithmetic operation on couple of Samplers.
Implemented via corresponding operation in ndarrays.
Parameters
----------
other : Sampler
second Sampler, the operation is applied to.
Returns
-------
Sampler
resulting sampler.
"""
_class = classes[fake]
return _class(self, other)
setattr(cls, oper, transform)
return cls
@arithmetize
class Sampler():
""" Base class Sampler that implements algebra of Samplers.
Attributes
----------
weight : float
weight of Sampler self in mixtures.
"""
def __init__(self, *args, **kwargs):
self.__array_priority__ = 100
self.weight = 1.0
# if dim is supplied, redefine sampling method
if 'dim' in kwargs:
# assemble stacked sampler
dim = kwargs.pop('dim')
stacked = type(self)(*args, **kwargs)
for _ in range(dim - 1):
stacked = type(self)(*args, **kwargs) & stacked
# redefine sample of self
self.sample = stacked.sample
def sample(self, size):
""" Sampling method of a sampler.
Parameters
----------
size : int
lentgh of sample to be generated.
Returns
-------
np.ndarray
Array of size (len, Sampler's dimension).
"""
raise NotImplementedError('The method should be implemented in child-classes!')
def __or__(self, other):
""" Implementation of '|' operation for two instances of Sampler-class.
The result is the mixture of two samplers. Weights are taken from
samplers' weight-attributes.
Parameters
----------
other : Sampler
the sampler to be added to self.
Returns
-------
Sampler
resulting mixture of two samplers.
"""
return OrSampler(self, other)
def __and__(self, other):
""" Implementation of '&' operation for instance of Sampler-class.
Two cases are possible: if ``other`` is numeric, then "&"-operation changes
the weight of a sampler. Otherwise, if ``other`` is also a Sampler, the resulting
Sampler is a multidimensional sampler, with starting coordinates being sampled from
``self``, and trailing - from ``other``.
Parameters
----------
other : int or float or Sampler
the sampler/weight for multiplication.
Returns
-------
Sampler
result of the multiplication.
"""
if isinstance(other, (float, int)):
self.weight *= other
return self
return AndSampler(self, other)
def __rand__(self, other):
""" Implementation of '&' operation on a weight for instance of Sampler-class.
see docstring of Sampler.__and__.
"""
return self & other
def apply(self, transform):
""" Apply a transformation to the sampler.
Build new sampler, which sampling function is given by `transform(self.sample(size))``.
Parameters
----------
transform : callable
function, that takes ndarray of shape (size, dim_sampler) and produces
ndarray of shape (size, new_dim_sampler).
Returns
-------
Sampler
instance of class Sampler with redefined method `sample`.
"""
return ApplySampler(self, transform)
def truncate(self, high=None, low=None, expr=None, prob=0.5, max_iters=None, sample_anyways=False):
""" Truncate a sampler. Resulting sampler produces points satisfying ``low <= pts <= high``.
If ``expr`` is suplied, the condition is ``low <= expr(pts) <= high``.
Uses while-loop to obtain a sample from the region of interest of needed size. The behaviour
of the while loop is controlled by parameters ``max_iters`` and ``sample_anyways``-parameters.
Parameters
----------
high : ndarray, list, float
upper truncation-bound.
low : ndarray, list, float
lower truncation-bound.
expr : callable, optional.
Some vectorized function. Accepts points of sampler, returns either bool or float.
In case of float, either high or low should also be supplied.
prob : float, optional
estimate of P(truncation-condtion is satisfied). When supplied,
can improve the performance of sampling-method of truncated sampler.
max_iters : float, optional
if the number of iterations needed for obtaining the sample exceeds this number,
either a warning or error is raised. By default is set to 1e7 (constant of TruncateSampler-class).
sample_anyways : bool, optional
If set to True, when exceeding `self.max_iters` number of iterations the procedure throws a warning
but continues. If set to False, the error is raised.
Returns
-------
Sampler
new Sampler-instance, truncated version of self.
"""
return TruncateSampler(self, high, low, expr, prob, max_iters, sample_anyways)
class OrSampler(Sampler):
""" Class for implementing `|` (mixture) operation on `Sampler`-instances.
"""
def __init__(self, left, right, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bases = [left, right]
# calculate probs of samplers in mixture
weights = np.array([self.bases[0].weight, self.bases[1].weight])
self.weight = np.sum(weights)
self.normed = weights / np.sum(weights)
def sample(self, size):
""" Sampling procedure of a mixture of two samplers. Samples points with probabilities
defined by weights (`self.weight`-attr) from two samplers invoked (`self.bases`-attr) and
mixes them in one sample of needed size.
"""
up_size = np.random.binomial(size, self.normed[0])
low_size = size - up_size
up_sample = self.bases[0].sample(size=up_size)
low_sample = self.bases[1].sample(size=low_size)
sample_points = np.concatenate([up_sample, low_sample])
sample_points = sample_points[ | np.random.permutation(size) | numpy.random.permutation |
# Python modules
# 3rd party modules
import numpy as np
from lmfit import Parameters
# Our modules
import vespa.analysis.chain_fit_identity as chain_fit_identity
import vespa.common.util.math_ as util_math
import vespa.common.util.generic_spectral as util_spectral
import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt
from vespa.common.constants import DEGREES_TO_RADIANS as DTOR
from vespa.analysis.constants import FitLineshapeModel, VoigtDefaultFixedT2, FitMacromoleculeMethod
from vespa.analysis.constants import FitOptimizeMethod as optmeth
from vespa.analysis.chain_base import Chain
LMFIT_METHODS = [optmeth.LMFIT_DEFAULT, optmeth.LMFIT_JACOBIAN]
class ChainFitVoigt(Chain):
"""
Building block object used to create a processing chain for MRS data.
Performs LCM (linear combination model) fit to the data. Fit model is made
up of spectrally simulated basis spectra for all metabolites.
"""
def __init__(self, dataset, block):
"""
Chain objects organize Algo (algorithm) calls by setting up access to
input data and parameters, and creating standard output values for View.
Base class sets convenience references to: self._block and self._dataset
self.data is always initialized as []
"""
super().__init__(dataset, block)
self.fit_function = self.lorgauss_internal
self.reset_results_arrays()
# book-keeping attributes
self.lmfit_fvar_names = []
@property
def nmet(self):
""" Number of metabolites to be fitted - varies depending on model """
if self._block is not None:
if self._block.set.prior_list is not None:
return len(self._block.set.prior_list)
return 0
def reset_results_arrays(self):
"""
Results array reset is in its own method because it may need to be
called at other times that just in the object initialization.
"""
nmet = self.nmet
nmmol = self._block.nmmol
nparam = self._block.nparam
spectral_dim0 = self._dataset.spectral_dims[0]
if len(self.data) != spectral_dim0:
self.data = np.zeros(spectral_dim0, complex)
self.yini = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.yfit = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.base = np.zeros(spectral_dim0, complex)
self.initial_values = np.zeros(nparam, float)
self.fit_results = np.zeros(nparam, float)
self.fit_baseline = np.zeros(spectral_dim0, complex)
self.weight_array = np.zeros(spectral_dim0, complex)
self.limits = np.zeros((2,nparam), float)
self.fitted_lw = 0.0
def run_global_init(self):
""""
Moved all of the global (one time) initialization code to this method
so we could package it in run() in an 'if' statement. This is in line
with making the 'fit all voxels' functionality as streamlined as
possible.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
self.spectral_dims = self._dataset.spectral_dims
self.nmmol = self._block.nmmol
self.nparam = self._block.nparam
self.init_b0 = 0.0
self.init_lw_hz = 3.0
self.init_ta = 0.8
self.init_tb = 0.03
self.init_ampl = None
self.init_area = None
self.limits = np.zeros((2,self.nparam+self.nmmol), float)
self.weight_array = np.zeros(self._dataset.spectral_dims[0], complex)
self.fit_baseline = 0.0 # needed for LORGAUSS call
self.fit_function = self.lorgauss_internal
self.fix_t2_center = VoigtDefaultFixedT2.CENTER
self.minmaxlw = [0,0]
# set up basis set for selected metabolites, collect all ppm locations
basis_mets = []
ppms = []
for name in set.prior_list:
basis_mets.append(prior.basis_set[name].fid.copy())
ppms += prior.basis_set[name].all_ppms
self.basis_mets = np.array(basis_mets)
self.peakpts = self._dataset.ppm2pts(np.array(ppms)) # for weight array calc
# set up basis set for macromolecules if needed
#self.macromol_model = set.macromol_model
self.basis_mmol = None
if set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
if set.macromol_single_basis_dataset:
tmp = set.macromol_single_basis_dataset.blocks['raw']
self.basis_mmol = tmp.data.copy()
# check results arrays for proper dimensionality
block.check_parameter_dimensions(self)
def run(self, voxels, entry='initial_only', statusbar=None, do_init=True):
"""
Run is typically called every time a processing setting is changed
in the parent (block) object. Run processes a single voxel at a time.
This object maintains previous run() results values until next run().
This allows the View to update without having to re-run the pipeline.
The 'entry' keyword adds flexibility to Block-Chain-View relationship.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
dataset = self._dataset
#----------------------------------------------------------------------
# Return with zero values if no metabolites are selected
if self.nmet < 1:
self.yini = self.yini * 0
voxel = voxels[0]
self.data = dataset.get_source_data('fit')
self.data = self.data[voxel[2],voxel[1],voxel[0],:]
plot_results = { 'fitted_lw' : 3.0,
'minmaxlw' : [1,5],
'init_b0' : 0.0,
'init_ph0' : -dataset.get_phase_0(voxel) * np.pi/180.0,
'init_ph1' : -dataset.get_phase_1(voxel),
'data' : self.data.copy(),
'weight_array' : self.data.copy() * 0,
'fit_baseline' : self.data.copy() * 0,
'yfit' : self.data.copy() * 0,
'yini' : self.data.copy() * 0,
'init_baseline': self.data.copy() * 0,
'mmol_area' : 1.0 }
return plot_results
#----------------------------------------------------------------------
# Do the one time global bits of code, if needed
if do_init:
self.run_global_init()
#----------------------------------------------------------------------
# Now process the current voxel
data_source = dataset.get_source_data('fit')
voxel = voxels[0] # because we got rid of for-loop
x,y,z = voxel # for convenience
self.iteration = 0 # global index used in functors as a trigger
self.voxel = voxel
self.statusbar = statusbar
# local copy of input data
self.data = data_source[z,y,x,:].copy()
# spectral chain needs update for this line to be valid
self.chain = dataset.get_source_chain('fit')
self.kodata = self.chain.kodata.copy()
# various default values
self.mmol_area = 1.0
# copy 'global' parameters, that DO change with voxel, from dataset
#
# NB. phase0/1 are inputs for 'manual' method, the init_ph0/1 are
# outputs from initval calcs. If 'manual' is selected, then the
# output value should be equal but negative to original. We use
# the init_ph0/1 to update the GUI (and mrs_dataset values) so
# the chain needs both input and output (I think).
self.phase0 = dataset.get_phase_0(voxel)
self.phase1 = dataset.get_phase_1(voxel)
self.init_ph0 = -dataset.get_phase_0(voxel) * np.pi / 180.0 # match units in util_initial_values
self.init_ph1 = -dataset.get_phase_1(voxel)
# copy block parameters, that DO change with voxel, from block
self.frequency_shift = dataset.get_frequency_shift(voxel)
self.fit_baseline = block.fit_baseline[:,x,y,z].copy()
self.init_baseline = self.fit_baseline.copy() * 0
# setup chain results arrays
self.initial_values = voigt_checkout(self.nmet, block.initial_values[:,x,y,z], dataset)
self.fit_results = voigt_checkout(self.nmet, block.fit_results[ :,x,y,z], dataset)
self.fit_stats = block.fit_stats[ :,x,y,z].copy()
self.cramer_rao = block.cramer_rao[:,x,y,z].copy()
self.confidence = block.confidence[:,x,y,z].copy()
# select the chain processing functor based on the entry point
if entry == 'initial_only':
funct_fit_voigt.do_processing_initial(self)
elif entry == 'full_fit' or entry == 'all':
funct_fit_voigt.do_processing_full_fit(self)
elif entry == 'plot_refresh':
funct_fit_voigt.do_processing_plot_refresh(self)
elif entry == 'output_refresh':
funct_fit_voigt.do_processing_output_refresh(self)
elif entry == 'voxel_change':
if np.sum(self.initial_values[0:self.nmet])==0.0:
flag_auto_initvals = True
else:
flag_auto_initvals = False
funct_fit_voigt.do_processing_voxel_change(self, flag_auto_initvals=flag_auto_initvals)
else:
print('oooops! - chain_fit_voigt "entry" point error ')
if statusbar:
statusbar.SetStatusText(' Fitting Done', 0)
# one last lw calc to refresh HTLM window on opening VIFF file
self.fitted_lw, _ = util_spectral.voigt_width(self.fit_results[self.nmet*2], self.fit_results[self.nmet*2+1], dataset)
block.initial_values[:,x,y,z] = voigt_checkin(self.nmet, self.initial_values, dataset)
block.fit_results[ :,x,y,z] = voigt_checkin(self.nmet, self.fit_results, dataset)
block.fit_stats[ :,x,y,z] = self.fit_stats.copy()
block.fit_baseline[ :,x,y,z] = self.fit_baseline.copy()
block.cramer_rao[ :,x,y,z] = self.cramer_rao.copy()
block.confidence[ :,x,y,z] = self.confidence.copy()
# Initial value algorithms change b0, ph0/ph1. To be well behaved we ask
# the dataset object to save these to the 'spectral' block for us.
#
# NB. In CLI mode, call this chain with 'initial_only' first, then update
# the 'spectral' block and only then call this chain with 'full_fit'
dataset.set_frequency_shift(dataset.get_frequency_shift(voxel) + self.init_b0, voxel)
dataset.set_phase_0(-self.init_ph0 * 180.0 / np.pi, voxel)
dataset.set_phase_1(-self.init_ph1, voxel)
# Return values specific to calling Tab used to update its self.view (plot_panel_spectrum object).
plot_results = { 'fitted_lw' : self.fitted_lw,
'minmaxlw' : self.minmaxlw,
'init_b0' : self.init_b0,
'init_ph0' : self.init_ph0 * 180.0 / np.pi,
'init_ph1' : self.init_ph1,
'data' : self.data.copy(),
'weight_array' : self.weight_array.copy(),
'fit_baseline' : self.fit_baseline.copy(),
'yfit' : self.yfit.copy(),
'yini' : self.yini.copy(),
'init_baseline' : self.init_baseline.copy(),
'mmol_area' : self.mmol_area }
return plot_results
def create_param_labels(self):
""" Create list of unique parameter labels """
plabel = []
unique_abbr = [item.replace('-', '_') for item in self._dataset.prior_list_unique]
for item in unique_abbr: plabel.append('area_' + item)
for item in unique_abbr: plabel.append('freq_' + item)
plabel.append('ta')
plabel.append('tb')
plabel.append('ph0')
plabel.append('ph1')
if self._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
plabel.append('mmol_area')
plabel.append('mmol_freq')
return plabel
def lorgauss_internal_lmfit_dfunc(self, params, *args, **kwargs):
"""
This is in the format that LMFIT expects to call in the Minimizer class
for the 'least_squares' algorithm.
This returns the weighted partial derivative functions all_pders * ww
as a single numpy (n,m) float array, where where n = # of variable
parameters (versus dependent params) and m = # of spectral points. In
this case, the real and imaginary vectors have been concatenated into
a single array, so m = 2 * npts_spectral_zerofilled.
Note. The vespa model (for one example) might have 48 parameters, but
only 42 are variable parameters while the other 6 are dependent
expressions (e.g. freq_naag = freq_naa + 0.04). The LMFIT algorithm
only passes in the 42 'free' params, and I need to expand that into the
actual 48 for the self.lorgauss_internal() call to work properly. On
return, I need to remove the pder entris for the dependent parameters
(and return just a 42 x npts array).
params - these are just the free variable values, we need to expand this
into a full list/dict of free and evaluated expression variables
for the call to self.lorgauss_internal(). This can be a list of
current variable values, OR it can be an ordered dict of LMFIT
Paramters.
"""
ww = np.concatenate([self.weight_array, self.weight_array])
# expand list of free variable values into full list of free and evaluated expression values
all_params = self.all_params.copy() # copy of full param set
for name, val in zip(self.lmfit_fvar_names, params):
all_params[name].value = val # update free params to current pass values
all_params.update_constraints() # evaluate expression params values
yfit, all_pders = self.lorgauss_internal(all_params, pderflg=True)
# Re-sort all_pders array if inequality expressions present in Parameters list
#
# - pder returns in 'Vespa' order (area(s), freq(s), ta, tb, ph0, ph1, mmol_area, mmol_freq)
# - if inequality control vars have been added to end of Paramters list (typical in Vespa
# model) then we have to re-sort
# - usually things like 'freq_naag' have to be relocated to position where 'delta_freq_naa'
# was located in the 'params' variable that was input to this method
pders = []
indxs = []
all_names = list(all_params.keys())
for key in self.lmfit_fvar_names:
if 'delta_' in key:
indx = all_names.index(key.replace('delta_', ''))
pders.append(-1 * all_pders[indx, :]) # -1 is empirical vs LMFIT, bjs 3/2021
else:
indx = all_names.index(key)
pders.append(all_pders[indx, :])
indxs.append(indx)
pders = np.array(pders)
# expand complex to 1D and apply weighting scheme
dfunc = []
for pder in pders:
dfunc.append(np.concatenate([pder.real, pder.imag]) * ww * (-1)) # -1 is empirically vs LMFIT, bjs 3/2021
dfunc = np.array(dfunc)
return dfunc.T # empirical vs LMFIT requirement
def lorgauss_internal_lmfit(self, a, report_stats=False):
"""
This is in the format that LMFIT expects to call in the Minimizer class.
This returns the weighted difference (data - yfit) * ww as a single
numpy float array, where the real and imaginary vectors have been
concatenated into a single array.
a - fully expanded list of parameters, free and evaluated expressions
"""
data = self.data_scale.copy()
ww = self.weight_array
yfit, _ = self.lorgauss_internal(a, pderflg=False)
yfit = | np.concatenate([yfit.real, yfit.imag]) | numpy.concatenate |
import numpy as np
from scipy.optimize import minimize_scalar, minimize
from scipy.stats import norm, multivariate_normal
def sample_data():
dt = 1/12
maturity = np.array([1,3,5,10,20])
data = np.array([
[0.01995,0.02039,0.02158,0.02415,0.02603],
[0.01981,0.02024,0.02116,0.02346,0.02518],
[0.01838,0.01865,0.01969,0.02276,0.02466],
[0.01703,0.01739,0.01857,0.02177,0.02373],
[0.01746,0.01875,0.0211,0.0249,0.0271],
[0.0163,0.01773,0.0204,0.02468,0.02679],
[0.01597,0.01777,0.02048,0.0245,0.02658],
[0.01582,0.01735,0.01946,0.02308,0.02498],
[0.01553,0.01651,0.01846,0.02216,0.02388],
[0.01546,0.01627,0.01784,0.02088,0.02222],
[0.01631,0.01752,0.01945,0.02254,0.02366],
[0.01635,0.01719,0.01902,0.02181,0.02278],
[0.01587,0.01628,0.01772,0.02025,0.02121],
[0.01469,0.01474,0.01586,0.01826,0.01919],
[0.01507,0.01498,0.01611,0.01854,0.01918],
[0.01493,0.01468,0.01569,0.0181,0.01892],
[0.0148,0.01455,0.01551,0.01787,0.01886],
[0.01361,0.01334,0.01406,0.01617,0.01712],
[0.0126,0.01218,0.01246,0.01401,0.01482],
[0.01265,0.01238,0.01264,0.01417,0.01489],
[0.01322,0.01312,0.01353,0.01512,0.01545],
[0.01369,0.01361,0.01412,0.01596,0.01641],
[0.01511,0.01609,0.01739,0.01965,0.0204],
[0.01576,0.01692,0.01873,0.02159,0.02186],
[0.01496,0.01643,0.01821,0.02111,0.02168],
[0.01465,0.01665,0.01861,0.02163,0.02214],
[0.01485,0.01709,0.01909,0.02221,0.02302],
[0.01467,0.01678,0.01859,0.02182,0.02302],
[0.01464,0.0169,0.01906,0.02257,0.02388],
[0.01461,0.01673,0.01864,0.02165,0.02266],
[0.01464,0.0174,0.01942,0.02252,0.02317],
[0.01471,0.0178,0.01987,0.02287,0.02364],
[0.01481,0.01785,0.01989,0.02286,0.02318],
[0.01654,0.02026,0.02247,0.02455,0.02411],
[0.01787,0.0215,0.02355,0.0254,0.02518],
[0.01833,0.021,0.02298,0.02472,0.02439],
[0.01839,0.0219,0.02444,0.02626,0.02571],
[0.01851,0.02277,0.02537,0.0277,0.02738],
[0.01875,0.02271,0.02501,0.02708,0.02704],
[0.01873,0.02192,0.02436,0.02655,0.02671],
[0.01887,0.0225,0.02532,0.02757,0.02754],
[0.01851,0.02175,0.02442,0.02656,0.02649],
[0.01842,0.02097,0.02338,0.02549,0.02547],
[0.0183,0.02019,0.02242,0.02458,0.02429],
[0.0179,0.01953,0.02128,0.02315,0.02265],
[0.01853,0.0201,0.02164,0.02338,0.02284],
[0.01844,0.0194,0.02048,0.02209,0.02159],
[0.01776,0.01825,0.01893,0.01992,0.01969],
[0.01733,0.01807,0.01879,0.01991,0.02023],
[0.0176,0.01802,0.01869,0.01988,0.0205],
[0.01769,0.01789,0.01838,0.01953,0.01995],
[0.01751,0.01736,0.01771,0.01889,0.01913],
[0.01711,0.01679,0.01716,0.01828,0.01865],
[0.0156,0.01496,0.0153,0.01618,0.0166],
[0.01478,0.01382,0.01421,0.01506,0.0152],
[0.01186,0.01164,0.01201,0.01254,0.01251],
[0.01228,0.01287,0.01353,0.0142,0.01386],
[0.01298,0.01357,0.01442,0.01577,0.01568],
[0.01386,0.01492,0.01593,0.0175,0.01709],
[0.0135,0.0139,0.01481,0.01653,0.01628]
])
return dt, maturity, data
class DynamicNelsonSiegel:
"""
Example
-------
>>> dt, maturity, data = sample_data()
>>> dns = DynamicNelsonSiegel(dt, maturity)
>>> dns.train(data, disp=True)
>>> time, num = 1, 200
>>> scenarios = dns.sample(time, num)
>>> mean_reversion, level1, level2, twist1, twist2 = dns.shock(time)
"""
def __init__(self, dt, maturity):
self.maturity = maturity
self.dt = dt
self.params = None
self.x0 = None
self.A = None
self.B = None
self.Q = None
self.H = None
self.R = None
# def set_params(self, params):
# lambda_, eps, kappa11, kappa22, kappa33, theta1, theta2, theta3, sigma11, sigma21, sigma22, sigma31, sigma32, sigma33, L0, S0, C0 = params
# self.x0 = np.array([L0, S0, C0])
# self.params = params[:-3]
# self.A, self.B, self.Q, self.H, self.R = self._system(self.params)
def train(self, X, lr=5e-7, tol=1.5e1, disp=False):
if type(self.params) == type(None):
self.params = self._initial_value(X)
while(True):
params_grad = self._gradient(self.params, X)
self.params += lr*params_grad
self.A, self.B, self.Q, self.H, self.R = self._system(self.params)
self.x0 = self._filtering(self.params, X)[0]
norm = np.sqrt(sum(params_grad**2))
if disp:
loglik = self._filtering(self.params, X)[2]
print('Norm of Gradient: {:.6f}, Loglikelihood: {:.6f}'.format(norm, loglik))
if norm < tol:
break
def _system(self, params):
lambda_, eps, kappa11, kappa22, kappa33, theta1, theta2, theta3, sigma11, sigma21, sigma22, sigma31, sigma32, sigma33 = params
A = np.array([[1-kappa11*self.dt, 0, 0],
[0, 1-kappa22*self.dt, 0],
[0, 0, 1-kappa33*self.dt]])
B = np.array([kappa11*theta1, kappa22*theta2, kappa33*theta3])*self.dt
L = np.array([[sigma11, 0, 0],
[sigma21, sigma22, 0],
[sigma31, sigma32, sigma33]])
Q = self.dt*[email protected]
H = np.c_[np.ones_like(self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity)-np.exp(-lambda_*self.maturity)]
R = np.identity(len(self.maturity))*eps**2
return A, B, Q, H, R
def _initial_value(self, X):
def obj_fun(lambda_):
design_matrix = np.c_[np.ones_like(self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity)-np.exp(-lambda_*self.maturity)]
beta = np.linalg.inv(design_matrix.T@design_matrix)@[email protected]
rmse = np.sqrt(np.mean((X.T-design_matrix@beta)**2))
return rmse
res = minimize_scalar(obj_fun, method='bounded', bounds=(1e-2,1), options={'disp':False})
lambda_ = res.x
eps = obj_fun(lambda_)
design_matrix = np.c_[np.ones_like(self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity), (1-np.exp(-lambda_*self.maturity))/(lambda_*self.maturity)-np.exp(-lambda_*self.maturity)]
beta = ( | np.linalg.inv(design_matrix.T@design_matrix) | numpy.linalg.inv |
# encoding: utf-8
from __future__ import division
import sys
import os
import time
import datetime
import pandas as pd
import numpy as np
import math
import ast
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
ADD_PATH = "%s/../"%(CURRENT_DIR)
sys.path.append(ADD_PATH)
from tools.mail import MyEmail
from tools.html import html_with_style
DATA_PATH = "%s/../data/basic_matrix" % (CURRENT_DIR)
def reviewing_data(df,send_str):
if len(df) ==0:
send_str += '零单' + '<br>'
return send_str
send_str += '审核中单量:'+str(len(df[(df['type']=='nan')])) + ' ' + '<br>'
send_str += '审核中比例:'+str(100.0*len(df[(df['type']=='nan')])/len(df)) + ' ' + '<br>' + '<br>'
return send_str
def pass_rate(df,send_str):
if len(df) == 0:
send_str += '零完成单' + '<br>'
return send_str
out = []
concat = []
out.append((len(df),len(df[df['suggestion']=='1']),100.0*len(df[df['suggestion']=='1'])/len(df)))
if len(df[df['type']=='first']) == 0:
out.append((0,0,0))
else:
out.append((len(df[df['type']=='first']),len(df[(df['suggestion']=='1')&(df['type']=='first')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='first')])/len(df[df['type']=='first'])))
if len(df[df['type']=='regular']) == 0:
out.append((0,0,0))
else:
out.append((len(df[df['type']=='regular']),len(df[(df['suggestion']=='1')&(df['type']=='regular')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='regular')])/len(df[df['type']=='regular'])))
if len(df[df['type']=='again']) == 0:
out.append((0,0,0))
else:
out.append((len(df[df['type']=='again']),len(df[(df['suggestion']=='1')&(df['type']=='again')]),100.0*len(df[(df['suggestion']=='1')&(df['type']=='again')])/len(df[df['type']=='again'])))
frame = pd.DataFrame(out, index=['总通过率','新单通过率','续贷通过率','再次通过率'], columns=['订单申请数','通过数','通过率'])
concat.append(frame)
concat = pd.concat(concat, keys=['机审通过率'], axis=1)
send_str += html_with_style(concat) + '<br>'
return send_str
def baseline(df,send_str):
if len(df) == 0:
send_str += '无新单' + '<br>'
return send_str
out = []
concat = []
baseline_data = []
baseline = df['baseline'].values.tolist()
for i in baseline:
temp = ast.literal_eval(i)
if len(temp) != 0:
baseline_data.extend(temp)
if baseline_data == []:
send_str += '无baseline' + '<br>'
return send_str
for i in set(baseline_data):
out.append((i,baseline_data.count(i),100.0*baseline_data.count(i)/len(df)))
frame = pd.DataFrame(out, columns=['baseline','个数','拒绝率'])
concat.append(frame)
concat = pd.concat(concat, keys=['新单baseline拒绝率'], axis=1)
send_str += html_with_style(concat) + '<br>'
return send_str
def score(df,send_str):
if len(df) == 0:
send_str += '无新单' + '<br>'
return send_str
out = []
concat = []
temp = df['score'].values.tolist()
score = []
for i in temp:
try:
score.append(int(i))
except:
continue
score = np.array(score)
out.append(('>1000',sum(np.array(score)>1000),100.0*sum(np.array(score)>1000)/len(np.array(score))))
out.append(('901-1000',(sum(np.array(score)>900)-sum(np.array(score)>1000)),100.0*(sum(np.array(score)>900)-sum(np.array(score)>1000))/len(np.array(score))))
out.append(('801-900',(sum(np.array(score)>800)-sum(np.array(score)>900)),100.0*(sum(np.array(score)>800)-sum(np.array(score)>900))/len(np.array(score))))
out.append(('701-800',(sum(np.array(score)>700)-sum(np.array(score)>800)),100.0*(sum(np.array(score)>700)-sum(np.array(score)>800))/len(np.array(score))))
out.append(('601-700',(sum(np.array(score)>600)-sum(np.array(score)>700)),100.0*(sum(np.array(score)>600)-sum(np.array(score)>700))/len(np.array(score))))
out.append(('501-600',(sum(np.array(score)>500)-sum(np.array(score)>600)),100.0*(sum(np.array(score)>500)-sum( | np.array(score) | numpy.array |
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import re
import numpy as np
from shapely.geometry import Polygon, LineString
from shapely.ops import polygonize, unary_union
def list_intersect(a, b):
""" return the intersection of two lists """
return list(set(a) & set(b))
def list_union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
def ply_parser(fp):
'''
:param fp: PLY file path
:return: Surface coordinates and surface index
'''
tf = open(fp)
lines = tf.readlines()
flag = 0
for l in lines:
if re.search("\s*element\s*vertex\s*\d*", l) is not None:
vertex_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("\s*element\s*face\s*\d*", l) is not None:
face_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("end_header", l) is not None:
begin_num = flag + 1
flag += 1
x = [float(re.findall("-*\d+\.?\d*", l)[0]) for l in lines[begin_num:begin_num + vertex_num]]
y = [float(re.findall("-*\d+\.?\d*", l)[1]) for l in lines[begin_num:begin_num + vertex_num]]
z = [float(re.findall("-*\d+\.?\d*", l)[2]) for l in lines[begin_num:begin_num + vertex_num]]
cor = [[x[i], y[i], z[i]] for i in range(0, len(x))]
cor = np.asarray(cor)
f = [re.findall("\d+\.?\d*", l)
for l in lines[begin_num + vertex_num:begin_num + vertex_num + face_num]]
return cor, f
def check_relation(plane1, plane2):
'''
Checking spatial relationship between planes.
:param plane1:
:param plane2:
:return: spatial relationship tag
'''
p1 = Polygon(plane1)
p2 = Polygon(plane2)
try:
if p1.intersects(p2):
if p1.contains(p2):
flag = 1
else:
if p1.area >= p2.area:
flag = 2
else:
flag = 3
else:
flag = 4
return flag
except: # noqa: E722
return 4
def get_height_from_dem(cor, dem_parameter):
'''
Get Z coordinate from DEM based on given XY coordinate.
r1-r4 represent the image boundaries for coordinates outside.
:param cor: XY coordinate
:param dem: DEM object
:return: Z coordinate
'''
xOrigin = dem_parameter[0]
yOrigin = dem_parameter[1]
pixelWidth = dem_parameter[2]
pixelHeight = dem_parameter[3]
data = dem_parameter[4]
r = dem_parameter[5]
base_height = []
for i in range(cor.shape[0]):
x = cor[i, 0]
y = cor[i, 1]
xOffset = int((x - xOrigin) / pixelWidth)
yOffset = int((y - yOrigin) / pixelHeight)
try:
value = data[yOffset][xOffset]
base_height.append(value)
except: # noqa: E722
dist_2 = np.sum((r - np.array([yOffset, xOffset])) ** 2, axis=1)
index = np.argmin(dist_2)
value = data[r[index, 0]][r[index, 1]]
base_height.append(value)
return np.array(base_height)
def get_height_from_lower_surface(plane1, plane2):
'''
:param plane1: Higher surface
:param plane2: Lower surface
:return: Z coordinate on lower surface
'''
[a, b, c, d] = fit_plane(plane1)
def z(x):
return -(a * x[0] + b * x[1] + d) / c
return z([plane2[:, 0], plane2[:, 1]])
def get_difference_plane(plane1, plane2):
'''
Get difference and intersection part for two planes
:param plane1:
:param plane2:
:return:
'''
try:
p1 = Polygon(plane1)
p2 = Polygon(plane2)
pd = p2.difference(p1)
pi = p2.intersection(p1)
flag = True
p3 = np.array(pd.exterior.coords[:])
p4 = np.array(pi.exterior.coords[:])
return [flag, p3, p4]
except: # noqa: E722
flag = False
p3 = None
p4 = None
return [flag, p3, p4]
def fit_plane(point):
'''
Using normal vector and distance to origin to represent a plane.
:param point: Plane coordinates
:return: Plane parameters
'''
xyz_mean = np.array([point[:, 0].mean(), point[:, 1].mean(), point[:, 2].mean()])
xyz_m = np.array(
[point[:, 0] - xyz_mean[0], point[:, 1] - xyz_mean[1], point[:, 2] - xyz_mean[2]])
[U, S, V] = np.linalg.svd(xyz_m)
v = np.array([U[0, 2], U[1, 2], U[2, 2]])
a = v[0]
b = v[1]
c = v[2]
d = - np.dot(v, xyz_mean.transpose())
# normal vector of plane
return [a, b, c, d]
def rotate_plane(plane):
'''
Rotate a 3D plane into 2D plane.
:param plane:
:return: [2D plane coordinates, rotate tag(whether or not), rotation matrix, plane center]
'''
temp_cor = plane
p_n = fit_plane(temp_cor)
p_n = np.array(p_n[0:3])
s_n = np.array([0, 0, 1])
[rx, ry, rz] = np.cross(p_n, s_n)
ra = np.arccos(np.dot(p_n, s_n) / (np.linalg.norm(p_n) * np.linalg.norm(s_n)))
rotate_flag = False
rm = None
center = None
if abs(ra) > 0.001:
norm = np.linalg.norm(np.cross(p_n, s_n))
[rx, ry, rz] = [rx / norm, ry / norm, rz / norm]
r1 = [np.cos(ra) + rx ** 2 * (1 - np.cos(ra)), rx * ry * (1 - | np.cos(ra) | numpy.cos |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
import control as ctrl
import seaborn as sns
import cmath
import warnings
sns.set_style('darkgrid')
warnings. filterwarnings("ignore")
s = ctrl.TransferFunction.s
Hnorm = (453622744.6*s**2)/(s**4 + s**3*32786.3706 + s**2*537594936.7 + s*1.318*10**12 + 1.617*10**15)
Horig = (s**2*(4.564*10**8))/(s**4+s**3*(3.288*10**4) + s**2*(5.405*10**8)+s*(1.324*10**12)+1.622*10**15)
# repuesta escalon
#grafico respuesta al escalon
sns.set_style('darkgrid')
plt.figure(figsize= (8,6))
t1, h1 = ctrl.step_response(Hnorm)
t2, h2 = ctrl.step_response(Horig)
sns.lineplot(x=1000*t2,y=h2, color='g', linestyle = 'dashdot', label = '$H_{asignada}$', linewidth = 2)
sns.lineplot(x=1000*t1,y=h1, color='r', linestyle = 'dotted', label = '$H_{normalizada}$', linewidth = 1.5)
plt.xlabel('$t\quad [ms]$')
plt.ylabel("$v_{out}(t)\quad [V]$")
plt.title("Respuesta al escalon")
plt.legend(prop={'size': 15})
#respuesta impulso
#grafico respuesta al escalon
sns.set_style('darkgrid')
plt.figure(figsize= (8,6))
t1, h1 = ctrl.impulse_response(Hnorm)
t2, h2 = ctrl.impulse_response(Horig)
sns.lineplot(x=1000*t2,y=h2, color='g', linestyle = 'dashdot', label = '$H_{asignada}$', linewidth = 2)
sns.lineplot(x=1000*t1,y=h1, color='r', linestyle = 'dotted', label = '$H_{normalizada}$', linewidth = 1.5)
plt.xlabel('$t\quad [ms]$')
plt.ylabel("$v_{out}(t)\quad [V]$")
plt.title("Respuesta al impulso")
plt.legend(prop={'size': 15})
#respuesta seno
t = np.linspace(0, 7e-3, 1000, endpoint=False)
w1 = 1885.169112
u1 = | np.sin(w1 * t) | numpy.sin |
"""
Created on Dec 16 2021
@author: <NAME>
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
def plot_resistance(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
r_top = (self.u[0:-1, -1] - self.u[1:, -1]) * 25812 * self.Ly / self.dx
r_bottom = (self.u[0:-1, 0] - self.u[1:, 0]) * 25812 * self.Ly / self.dx
rxx = 25812 / self.lmbda
fig = plt.figure(figsize = [8,5])
plt.plot(self.x[0:-1] - self.dx, r_top, 'r', label='top')
plt.plot(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')
plt.hlines(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey', label=r'$\rho_{xx}$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'$\rho_{xx}$ $(\Omega)$');
plt.legend()
plt.ylim([0, 12000]);
plt.show()
def add_contact(self, contact):
if contact.x1 > self.Lx or contact.x2 > self.Lx:
raise Exception('Contact out of bounds')
self.contacts.append(contact)
def measure_contact_voltageonly(self, contact):
# Args: contact instance
# Returns: measured resistivity
# Voltage is averaged across voltage tap
# THIS FUNCTION DOES NOT CHECK THE CURRENT!
# This method assumes 2terminal resistance is h/e2, which in general is wrong
if | np.all(self.u==0) | numpy.all |
import math
import shutil
import faiss
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from tqdm import tqdm
def compute_features(eval_loader, model, args):
print('Computing features...')
model.eval()
features = torch.zeros(len(eval_loader.dataset), args.low_dim).cuda()
for i, (images, index) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(non_blocking=True)
feat = model(images, is_eval=True)
features[index] = feat
dist.barrier()
dist.all_reduce(features, op=dist.ReduceOp.SUM)
return features.cpu()
def run_kmeans(x, args):
"""
Args:
x: data to be clustered
"""
print('performing kmeans clustering')
results = {'im2cluster': [], 'centroids': [], 'density': []}
for seed, num_cluster in enumerate(args.num_cluster):
# intialize faiss clustering parameters
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = True
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = args.gpu
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
D, I = index.search(x, 1) # for each sample, find cluster distance and assignments
im2cluster = [int(n[0]) for n in I]
# get cluster centroids
centroids = faiss.vector_to_array(clus.centroids).reshape(k, d)
# sample-to-centroid distances for each cluster
Dcluster = [[] for c in range(k)]
for im, i in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
# concentration estimation (phi)
density = | np.zeros(k) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Nuclear Magnetic Resonance (NMR) dataset
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
# %%
# The following example is a :math:`^{29}\mathrm{Si}` NMR time-domain
# saturation recovery measurement of a highly siliceous zeolite ZSM-12.
# Usually, the spin recovery measurements are acquired over a rectilinear grid
# where the measurements along one of the dimensions are non-uniform and span several
# orders of magnitude. In this example, we illustrate the use of `monotonic`
# dimensions for describing such datasets.
#
# Let's load the file.
import csdmpy as cp
filename = "https://osu.box.com/shared/static/27yrgdaubtb4wqj5adbavp2u16c2h7k8.csdf"
NMR_2D_data = cp.load(filename)
print(NMR_2D_data.description)
# %%
# The tuples of the dimension and dependent variable instances from the
# ``NMR_2D_data`` instance are
x = NMR_2D_data.dimensions
y = NMR_2D_data.dependent_variables
# %%
# respectively. There are two dimension instances in this example with respective
# dimension data structures as
print(x[0].data_structure)
# %%
# and
print(x[1].data_structure)
# %%
# respectively. The first dimension is uniformly spaced, as indicated by the
# `linear` subtype, while the second dimension is non-linear and monotonically
# sampled. The coordinates along the respective dimensions are
x0 = x[0].coordinates
print(x0)
# %%
x1 = x[1].coordinates
print(x1)
# %%
# Notice, the unit of ``x0`` is in microseconds. It might be convenient to
# convert the unit to milliseconds. To do so, use the
# :meth:`~csdmpy.Dimension.to` method of the respective
# :ref:`dim_api` instance as follows,
x[0].to("ms")
x0 = x[0].coordinates
print(x0)
# %%
# As before, the components of the dependent variable are accessed using the
# :attr:`~csdmpy.DependentVariable.components` attribute.
y00 = y[0].components[0]
# %%
# **Visualize the dataset**
#
# The :meth:`~csdmpy.plot` method is a very basic supplementary function for
# quick visualization of 1D and 2D datasets. You may use this function to plot
# the data from this example, however, we use the following script to
# visualize the data with projections onto the respective dimensions.
# %%
import matplotlib.pyplot as plt
from matplotlib.image import NonUniformImage
import numpy as np
# Set the extents of the image.
# To set the independent variable coordinates at the center of each image
# pixel, subtract and add half the sampling interval from the first
# and the last coordinate, respectively, of the linearly sampled
# dimension, i.e., x0.
si = x[0].increment
extent = (
(x0[0] - 0.5 * si).to("ms").value,
(x0[-1] + 0.5 * si).to("ms").value,
x1[0].value,
x1[-1].value,
)
# Create a 2x2 subplot grid. The subplot at the lower-left corner is for
# the image intensity plot. The subplots at the top-left and bottom-right
# are for the data slice at the horizontal and vertical cross-section,
# respectively. The subplot at the top-right corner is empty.
fig, axi = plt.subplots(
2, 2, gridspec_kw={"width_ratios": [4, 1], "height_ratios": [1, 4]}
)
# The image subplot quadrant.
# Add an image over a rectilinear grid. Here, only the real part of the
# data values is used.
ax = axi[1, 0]
im = NonUniformImage(ax, interpolation="nearest", extent=extent, cmap="bone_r")
im.set_data(x0, x1, y00.real / y00.real.max())
# Add the colorbar and the component label.
cbar = fig.colorbar(im)
cbar.ax.set_ylabel(y[0].axis_label[0])
# Set up the grid lines.
ax.images.append(im)
for i in range(x1.size):
ax.plot(x0, | np.ones(x0.size) | numpy.ones |
# Simple script to plot variables. Will grow with time.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import argparse
parser = argparse.ArgumentParser(description='reading strings for plotting')
parser.add_argument('strings', metavar='ID', nargs='+', help='string to plot')
args = parser.parse_args()
print(args.strings)
#class for all variables with initial definitions
class params:
# Defining static / default values
xDim = 512
yDim = 512
# data_dir is assumed to be in the previous directory
data_dir = "data"
# Defaulting to first element after imaginary time evolution
start = 0
end = 1
incr = 1
# item to work with
item = "wfc"
# Function to plot specific variable
def plot_var(xDim, yDim, data_dir, pltval):
if data_dir[0] != "/":
data_dir = "../" + data_dir
data = data_dir + "/" + pltval
lines = np.loadtxt(data)
val = np.reshape(lines, (xDim,yDim))
'''
val = -np.log(val) * 1E4 * 1.0545718E-34
data_V = "../data/K_0"
lines_V = np.loadtxt(data_V)
V_val = np.reshape(lines_V, (xDim, yDim))
final_val = V_val - val
'''
plt.imshow(val, extent=(1,xDim,1,yDim), interpolation='nearest',
cmap = cm.jet)
plt.colorbar()
fig = plt.gcf()
#plt.clim(0,1)
plt.show()
# function to plot a variable with a range
def plot_var_range(xDim, yDim, data_dir, pltval, start, end, incr):
if data_dir[0] != "/":
data_dir = "../" + data_dir
for i in range(start, end, incr):
print(i)
output = pltval + "%s" % i
data = data_dir + "/" + output
lines = np.loadtxt(data)
val = np.reshape(lines, (xDim,yDim))
plt.imshow(val, extent=(1,xDim,1,yDim), interpolation='nearest',
cmap = cm.jet)
plt.colorbar()
fig = plt.gcf()
#plt.show()
plt.draw()
num_str = "%s" % i
output_str = pltval + num_str.rjust(5,'0') + ".png"
fig.savefig(output_str)
plt.clf()
# Function to plot wfc with pltvar as a variable to modify the type of plot
def plot_wfc(xDim, yDim, data_dir, pltval, start, end, incr):
if data_dir[0] != "/":
data_dir = "../" + data_dir
for i in range(start,end,incr):
print(i)
data_real = data_dir + "/wfc_0_const_%s" % i
data_im = data_dir + "/wfc_0_consti_%s" % i
if pltval == "wfc_ev":
data_real = data_dir + "/wfc_ev_%s" % i
data_im = data_dir + "/wfc_evi_%s" % i
#data_x = data_dir + "x_0" % i
#data_y = data_dir + "y_0" % i
#print(i)
lines_real = np.loadtxt(data_real)
lines_im = np.loadtxt(data_im)
wfc_real = np.reshape(lines_real, (xDim,yDim));
wfc_im = np.reshape(lines_im, (xDim,yDim));
wfc = abs(wfc_real + 1j * wfc_im)
wfc = wfc * wfc
#wfc_k = np.fft.fft2(wfc)
#wfc_k_plot = np.abs(np.fft.fftshift(wfc_k))
#wfc_k_plot = wfc_k_plot**2
plt.imshow(wfc, extent=(-6.9804018707623236e-04,6.9804018707623236e-04,-6.9804018707623236e-04,6.9804018707623236e-04), interpolation='nearest',
cmap = cm.jet)
plt.colorbar()
#plt.clim(0,1)
plt.show()
#fig = plt.figure()
#fig.savefig('wfc.png')
# Function to plot complex vals with pltvar as the variable
def plot_complex(xDim, yDim, data_dir, pltval, start, end, incr):
if data_dir[0] != "/":
data_dir = "../" + data_dir
data_real = data_dir + "/" + pltval + "_0"
data_im = data_dir + "/" + pltval + "i_0"
lines_real = np.loadtxt(data_real)
lines_im = np.loadtxt(data_im)
wfc_real = np.reshape(lines_real, (xDim,yDim));
wfc_im = np.reshape(lines_im, (xDim,yDim));
wfc = abs(wfc_real + 1j * wfc_im)
wfc = wfc * wfc
plt.imshow(wfc, extent=(1,xDim,1,yDim), interpolation='nearest',
cmap = cm.jet)
plt.colorbar()
plt.show()
#fig = plt.figure()
#fig.savefig('wfc.png')
# Function to plot wfc with pltvar as a variable to modify the type of plot
def plot_wfc_k(xDim, yDim, data_dir, pltval, start, end, incr):
if data_dir[0] != "/":
data_dir = "../" + data_dir
for i in range(start,end,incr):
print(i)
data_real = data_dir + "/wfc_0_const_%s" % i
data_im = data_dir + "/wfc_0_consti_%s" % i
if pltval == "wfc_k_ev":
data_real = data_dir + "/wfc_ev_%s" % i
data_im = data_dir + "/wfc_0_evi_%s" % i
lines_real = np.loadtxt(data_real)
lines_im = np.loadtxt(data_im)
wfc_real = | np.reshape(lines_real, (xDim,yDim)) | numpy.reshape |
import math
import srwlib
import numpy as np
from srwlib import *
def createGsnSrcSRW(sigrW,propLen,pulseE,poltype,phE=10e3,sampFact=15,mx=0,my=0):
"""
#sigrW: beam size at waist [m]
#propLen: propagation length [m] required by SRW to create numerical Gaussian
#pulseE: energy per pulse [J]
#poltype: polarization type (0=linear horizontal, 1=linear vertical, 2=linear 45 deg, 3=linear 135 deg, 4=circular right, 5=circular left, 6=total)
#phE: photon energy [eV]
#sampFact: sampling factor to increase mesh density
"""
constConvRad = 1.23984186e-06/(4*3.1415926536) ##conversion from energy to 1/wavelength
rmsAngDiv = constConvRad/(phE*sigrW) ##RMS angular divergence [rad]
sigrL=math.sqrt(sigrW**2+(propLen*rmsAngDiv)**2) ##required RMS size to produce requested RMS beam size after propagation by propLen
#***********Gaussian Beam Source
GsnBm = SRWLGsnBm() #Gaussian Beam structure (just parameters)
GsnBm.x = 0 #Transverse Positions of Gaussian Beam Center at Waist [m]
GsnBm.y = 0
GsnBm.z = propLen #Longitudinal Position of Waist [m]
GsnBm.xp = 0 #Average Angles of Gaussian Beam at Waist [rad]
GsnBm.yp = 0
GsnBm.avgPhotEn = phE #Photon Energy [eV]
GsnBm.pulseEn = pulseE #Energy per Pulse [J] - to be corrected
GsnBm.repRate = 1 #Rep. Rate [Hz] - to be corrected
GsnBm.polar = poltype #1- linear horizontal?
GsnBm.sigX = sigrW #Horiz. RMS size at Waist [m]
GsnBm.sigY = GsnBm.sigX #Vert. RMS size at Waist [m]
GsnBm.sigT = 10e-15 #Pulse duration [s] (not used?)
GsnBm.mx = mx #Transverse Gauss-Hermite Mode Orders
GsnBm.my = my
#***********Initial Wavefront
wfr = SRWLWfr() #Initial Electric Field Wavefront
wfr.allocate(1, 1000, 1000) #Numbers of points vs Photon Energy (1), Horizontal and Vertical Positions (dummy)
wfr.mesh.zStart = 0.0 #Longitudinal Position [m] at which initial Electric Field has to be calculated, i.e. the position of the first optical element
wfr.mesh.eStart = GsnBm.avgPhotEn #Initial Photon Energy [eV]
wfr.mesh.eFin = GsnBm.avgPhotEn #Final Photon Energy [eV]
wfr.unitElFld = 1 #Electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)
distSrc = wfr.mesh.zStart - GsnBm.z
#Horizontal and Vertical Position Range for the Initial Wavefront calculation
#can be used to simulate the First Aperture (of M1)
#firstHorAp = 8.*rmsAngDiv*distSrc #[m]
xAp = 8.*sigrL
yAp = xAp #[m]
wfr.mesh.xStart = -0.5*xAp #Initial Horizontal Position [m]
wfr.mesh.xFin = 0.5*xAp #Final Horizontal Position [m]
wfr.mesh.yStart = -0.5*yAp #Initial Vertical Position [m]
wfr.mesh.yFin = 0.5*yAp #Final Vertical Position [m]
sampFactNxNyForProp = sampFact #sampling factor for adjusting nx, ny (effective if > 0)
arPrecPar = [sampFactNxNyForProp]
srwl.CalcElecFieldGaussian(wfr, GsnBm, arPrecPar)
##Beamline to propagate to waist
optDriftW=SRWLOptD(propLen)
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
optBLW = SRWLOptC([optDriftW],[propagParDrift])
#wfrW=deepcopy(wfr)
srwl.PropagElecField(wfr, optBLW)
return wfr
def createDriftLensBL2(Length,f):
"""
#Create beamline for propagation from end of crystal to end of cavity and through lens (representing a mirror)
#First propagate by Length, then through lens with focal length f
#Length: drift length [m]
#f: focal length
"""
#f=Lc/4 + df
optDrift=SRWLOptD(Length)
optLens = SRWLOptL(f, f)
propagParLens = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftLensBL = SRWLOptC([optDrift,optLens],[propagParDrift,propagParLens])
return DriftLensBL
def createDriftLensBL(Lc,df):
"""
#Create beamline for propagation from center of cell to end and through lens (representing a mirror)
#First propagate Lc/2, then through lens with focal length Lc/2 + df
#Lc: cavity length [m]
#df: focusing error
"""
f=Lc/4 + df
optDrift=SRWLOptD(Lc/2)
optLens = SRWLOptL(f, f)
propagParLens = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParDrift = [0, 0, 1., 1, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftLensBL = SRWLOptC([optDrift,optLens],[propagParDrift,propagParLens])
return DriftLensBL
def createDriftBL(Lc):
"""
#Create drift beamline container that propagates the wavefront through half the cavity
#Lc is the length of the cavity
"""
optDrift=SRWLOptD(Lc/2)
propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftBL = SRWLOptC([optDrift],[propagParDrift])
return DriftBL
def createBL1to1(L,dfof=0):
"""
##Define beamline geometric variables.
#L: drift length before and after lens
#dfof: focal length variation factor (=0 for no variation; can be positive or negative)
"""
##Drift lengths between elements beginning with source to 1st crystal and ending with last crystal to start of undulator.
##focal length in meters
f=(L/2)*(1+dfof)
#Lens
optLens = SRWLOptL(f, f)
#Drift spaces
optDrift1=SRWLOptD(L)
optDrift2=SRWLOptD(L)
#***********Wavefront Propagation Parameters:
#[0]: Auto-Resize (1) or not (0) Before propagation
#[1]: Auto-Resize (1) or not (0) After propagation
#[2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[3] Type of the propagator:
#0 - Standard - Fresnel (it uses two FFTs);
#1 - Quadratic Term - with semi-analytical treatment of the quadratic (leading) phase terms (it uses two FFTs);
#2 - Quadratic Term - Special - special case;
#3 - From Waist - good for propagation from "waist" over a large distance (it uses one FFT);
#4 - To Waist - good for propagation to a "waist" (e.g. some 2D focus of an optical system) over some distance (it uses one FFT).
#[4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[6]: Horizontal Resolution modification factor at Resizing
#[7]: Vertical Range modification factor at Resizing
#[8]: Vertical Resolution modification factor at Resizing
#[9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#propagParLens = [0, 0, 1., 0, 0, 1., 1.5, 1., 1.5, 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
##Beamline consruction
optBL1to1 = SRWLOptC([optDrift1,optLens,optDrift2],[propagParDrift,propagParLens,propagParDrift])
return optBL1to1
def createReflectionOffFocusingMirrorBL(L,f,strDataFolderName,strMirSurfHeightErrInFileName):
"""
#Create an SRW beamline container that will propagate a length L
#then reflect off a flat mirror followed by a lens. Finally, propagate by L again.
#L: length of propagation [m]
#f: focal length of mirror [m]
#strDataFolderName: Folder name where mirror data file is
#strMirSurfHeightErrInFileName: File name for mirror slope error file
#Assuming waist to waist propagation, we want f~L/2 (Note that this isn't a perfect identity
#map in phase space due to the Rayleigh length of the mode)
"""
#Drift
optDrift1=SRWLOptD(L)
#Transmission element to simulate mirror slope error
#angM1 = np.pi #Incident Angle of M1 [rad] ( 1.8e-3 in Ex. 9 )
#angM1 = 3.14 #Incident Angle of M1 [rad]
angM1 = 1.e-2
heightProfData = srwl_uti_read_data_cols(os.path.join(os.getcwd(), strDataFolderName, strMirSurfHeightErrInFileName), _str_sep='\t', _i_col_start=0, _i_col_end=1)
opTrErM1 = srwl_opt_setup_surf_height_1d(heightProfData, _dim='y', _ang=angM1, _amp_coef=1) #_amp_coef=1e4
#print(' Saving optical path difference data to file (for viewing/debugging) ... ', end='')
#opPathDifErM1 = opTrErM1.get_data(3, 3)
#srwl_uti_save_intens_ascii(opPathDifErM1, opTrErM1.mesh, os.path.join(os.getcwd(), strDataFolderName, strMirOptPathDifOutFileName01), 0,
# ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Diff.'], _arUnits=['', 'm', 'm', 'm'])
#Lens
optLens = SRWLOptL(f, f)
#Propagation parameters
propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
prPar0 = [0, 0, 1., 1, 0, 1., 1., 1., 1., 0, 0, 0]
#Construct beamline
optBL = SRWLOptC([optDrift1,opTrErM1,optLens,optDrift1],[propagParDrift,prPar0,propagParLens,propagParDrift])
#optBL = SRWLOptC([optDrift1,optLens,optDrift1],[propagParDrift,propagParLens,propagParDrift])
return optBL
def createABCDbeamline(A,B,C,D):
"""
#Use decomposition of ABCD matrix into kick-drift-kick Pei-Huang 2017 (https://arxiv.org/abs/1709.06222)
#Construct corresponding SRW beamline container object
#A,B,C,D are 2x2 matrix components.
"""
f1= B/(1-A)
L = B
f2 = B/(1-D)
optLens1 = SRWLOptL(f1, f1)
optDrift=SRWLOptD(L)
optLens2 = SRWLOptL(f2, f2)
propagParLens1 = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
propagParLens2 = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
optBL = SRWLOptC([optLens1,optDrift,optLens2],[propagParLens1,propagParDrift,propagParLens2])
return optBL
def createCrystal(n0,n2,L_cryst):
"""
#Create a set of optical elements representing a crystal.
#Treat as an optical duct
#ABCD matrix found here: https://www.rp-photonics.com/abcd_matrix.html
#n(r) = n0 - 0.5 n2 r^2
#n0: Index of refraction along the optical axis
#n2: radial variation of index of refraction
"""
if n2==0:
optBL=createDriftBL(2*L_cryst) #Note that this drift function divides length by 2
#print("L_cryst/n0=",L_cryst/n0)
else:
gamma = np.sqrt(n2/n0)
A = np.cos(gamma*L_cryst)
B = (1/(gamma))*np.sin(gamma*L_cryst)
C = -gamma*np.sin(gamma*L_cryst)
D = np.cos(gamma*L_cryst)
optBL=createABCDbeamline(A,B,C,D)
return optBL
def rmsWavefrontIntensity(wfr):
"""
#Compute rms values from a wavefront object
"""
IntensityArray2D = array('f', [0]*wfr.mesh.nx*wfr.mesh.ny) #"flat" array to take 2D intensity data
srwlib.srwl.CalcIntFromElecField(IntensityArray2D, wfr, 6, 0, 3, wfr.mesh.eStart, 0, 0) #extracts intensity
##Reshaping electric field data from flat to 2D array
IntensityArray2D = np.array(IntensityArray2D).reshape((wfr.mesh.nx, wfr.mesh.ny), order='C')
xvals= | np.linspace(wfr.mesh.xStart,wfr.mesh.xFin,wfr.mesh.nx) | numpy.linspace |
import pretty_midi as pm
import numpy as np
from collections import Counter
class OneHotEncoder:
def __init__(self, depth, axis=-1):
self.depth = depth
self.axis = axis
def _onehot(self, data):
oh = np.zeros((self.depth), dtype=np.uint8)
if data >= 0 and data < self.depth:
data = int(data)
oh[data] = 1
return oh
def transform(self, data_list):
one_hot_encoded = [self._onehot(data) for data in data_list]
one_hot_encoded = np.stack(one_hot_encoded, axis=0)
return one_hot_encoded
def build_dataset(song_path, include_velocity=True, augment=range(1)):
mid = pm.PrettyMIDI(song_path)
# get time signature
numerators = [t.numerator for t in mid.time_signature_changes]
denominators = [t.denominator for t in mid.time_signature_changes]
count = Counter(numerators)
numerator = sorted(numerators, key=lambda x: count[x], reverse=True)[0]
count = Counter(denominators)
denominator = sorted(denominators, key=lambda x: count[x], reverse=True)[0]
# extract all notes from non-drum instruments
midi_note = []
for ins in mid.instruments:
if not ins.is_drum:
for n in ins.notes:
midi_note.append((n.pitch, n.start, n.end, n.velocity))
midi_note = sorted(midi_note, key=lambda x: (x[1], x[0]))
# create features [pitch, velocity, dt, duration, start_time]
prev_start = 0
song = []
for m in midi_note:
t = mid.time_to_tick(m[1]) - prev_start
pitch = m[0]
song.append((np.clip(pitch, 21, 108), m[3], t/mid.resolution, mid.time_to_tick(m[2]-m[1])/mid.resolution, m[1]))
prev_start = mid.time_to_tick(m[1])
# create list of non-overlapping segments of 4 bars
time_per_bar = mid.tick_to_time(numerator * mid.resolution * (4/denominator))
total_bars = int((mid.get_end_time()//time_per_bar))
bars = []
tmp = []
for i in range(0, total_bars, 4):
for m in song:
if m[-1] >= (i*time_per_bar) and m[-1] < ((i*time_per_bar) + (time_per_bar*4)):
tmp.append(m[:-1])
bars.append(tmp)
tmp = []
# keep only segments that have more than 5 note events
bars = [np.stack(b) for b in bars if len(b)>= 5]
p_ohe = OneHotEncoder(89)
t_ohe = OneHotEncoder(33)
X = []
for bb in bars:
b = np.split(bb, 4, -1)
b = [np.squeeze(i) for i in b]
for i in augment:
P, V, D, R = [], [], [], []
p = p_ohe.transform(b[0]-21+i)
d = np.minimum(np.round((b[2]/4) * 32), 32)
d = t_ohe.transform(d)
r = np.minimum(np.round((b[3]/4) * 32), 32)
r = t_ohe.transform(r)
v = (b[1] / 63.5) - 1
v = v.astype(np.float32)
P.append(p)
V.append(v)
D.append(d)
R.append(r)
P = np.concatenate(P, axis=0)
V = np.expand_dims(np.concatenate(V, axis=0), -1)
D = np.concatenate(D, axis=0)
R = np.concatenate(R, axis=0)
if include_velocity:
tmp = np.concatenate([P, D, R, V], -1)
END_TOKEN = np.zeros(dtype=np.float32, shape=(1, tmp.shape[-1]))
END_TOKEN[0, 88] = 1.0
tmp = | np.concatenate([tmp, END_TOKEN], 0) | numpy.concatenate |
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wrapped_dataset
base_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'
data_dir = join(base_dir, 'data')
# Create wrapped CTF dataset
wrap_f = create_wrapped_dataset(data_dir, output_dataset_name="virtual.hdf5")
n_lstms = 512
n_repeats = 8
n_players = 4
map_id = 0
# Get matchups with all same agents (e.g. AA vs AA)
agent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]
matchup_ids = np.all(agent_ids[:, 0, :] ==
agent_ids[:, 0, 0][:, np.newaxis], axis=1)
n_matchups = np.sum(matchup_ids) # 0, 34, 49, 54
# Extract LSTMs for one map and matchup
lstms_matched = np.tanh(wrap_f['map/matchup/repeat/player/time/lstm'][
map_id, matchup_ids, ...].astype(np.float32))
print("Loaded LSTMs for within-population matchups")
# Loop through matchups, repeats, and players to compute PCA
k = n_lstms
lstm_pca = {}
for m in np.arange(n_matchups):
lstm_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pca[m][r] = {}
for p in np.arange(n_players):
lstm_pca[m][r][p] = {}
pca = PCA(n_components=k)
transformed = pca.fit_transform(
#zscore(lstms_matched[m, r, p], axis=0))
#np.tanh(lstms_matched[m, r, p]))
zscore(lstms_matched[m, r, p], axis=0))
lstm_pca[m][r][p]['transformed'] = transformed
lstm_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, player {p}")
np.save('results/pca_lstm_tanh-z_results.npy', lstm_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pca_long = {'population': [], 'repeat': [], 'player': [],
'variance explained': [], 'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for k, v in enumerate(lstm_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pca_long['population'].append(pops[m])
lstm_pca_long['repeat'].append(r)
lstm_pca_long['player'].append(p)
lstm_pca_long['variance explained'].append(v)
lstm_pca_long['dimension'].append(k + 1)
lstm_pca_long = pd.DataFrame(lstm_pca_long)
max_k = 30
lstm_pca_trunc = lstm_pca_long[lstm_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pca_trunc, x='dimension',
y='variance explained', hue='repeat',
col='population', col_wrap=2,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, ..., i]))
min = int(np.amin(percents_vaf[m, ..., i]))
max = int(np.amax(percents_vaf[m, ..., i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack pairs of players and compute joint PCA
pairs = list(combinations(np.arange(n_players), 2))
n_pairs = len(pairs)
k = n_lstms * 2
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lstm_pair_pca = {}
for m in np.arange(n_matchups):
lstm_pair_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pair_pca[m][r] = {}
for p, pair in enumerate(pairs):
lstm_pair_pca[m][r][p] = {}
stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],
lstms_matched[m, r, pair[1]]))
pca = PCA(n_components=k)
transformed = pca.fit_transform(
zscore(stack_lstm, axis=0))
lstm_pair_pca[m][r][p]['transformed'] = transformed
lstm_pair_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, pair {pair}")
np.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],
'variance explained': [], 'dimension': [],
'type': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
pair_type = {c:('cooperative' if c in coop_ids else 'competitive')
for c in np.arange(n_pairs)}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for k, v in enumerate(lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pair_pca_long['population'].append(pops[m])
lstm_pair_pca_long['repeat'].append(r)
lstm_pair_pca_long['pair'].append(p)
lstm_pair_pca_long['variance explained'].append(v)
lstm_pair_pca_long['dimension'].append(k + 1)
lstm_pair_pca_long['type'].append(pair_type[p])
lstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)
max_k = 10
lstm_pair_pca_trunc = lstm_pair_pca_long[
lstm_pair_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pair_pca_trunc, x='dimension',
y='variance explained', hue='type',
col='population', col_wrap=2, linewidth=3,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for type, c in zip(['cooperative', 'competitive'],
[coop_ids, comp_ids]):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, :, c, i]))
min = int(np.amin(percents_vaf[m, :, c, i]))
max = int( | np.amax(percents_vaf[m, :, c, i]) | numpy.amax |
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from realcomp.task import config
from torch.distributions import Normal
# from matplotlib import pyplot as plt
class ModelActor(nn.Module):
def __init__(self,
size_obs=13,
num_actions=9, # Our robot : 9 angles
size_layers=[32, 32],
log_std=0.,
lr=3e-4):
super(ModelActor, self).__init__()
self.layers = nn.ModuleList()
num_hidden = len(size_layers)
self.layers.append(nn.Linear(size_obs, size_layers[0]))
self.layers.append(nn.ReLU())
for i in range(num_hidden - 1):
self.layers.append(nn.Linear(size_layers[i], size_layers[i + 1]))
self.layers.append(nn.ReLU())
self.layers.append(nn.Linear(size_layers[num_hidden - 1], num_actions))
self.log_std = nn.Parameter(torch.ones(1, num_actions) * log_std)
self.num_actions = num_actions
def forward(self, x):
for layer in self.layers:
x = layer(x)
mu = x # Might need to rescale it ? tanh ?
std = self.log_std.exp()
if mu.dim() > 1:
std = std.expand_as(mu)
#mu = torch.tanh(mu) * np.pi / 2
dist = Normal(mu, std, validate_args=True)
return dist
class ModelCritic(nn.Module):
def __init__(self,
size_obs=13,
size_layers=[32, 32],
lr=3e-3):
super(ModelCritic, self).__init__()
self.layers = nn.ModuleList()
num_hidden = len(size_layers)
self.layers.append(nn.Linear(size_obs, size_layers[0]))
self.layers.append(nn.ReLU())
for i in range(num_hidden - 1):
self.layers.append(nn.Linear(size_layers[i], size_layers[i + 1]))
self.layers.append(nn.ReLU())
self.layers.append(nn.Linear(size_layers[num_hidden - 1], 1))
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class CNN(nn.Module):
def __init__(self, shape_pic=(96, 144, 3), size_output=256):
super(CNN, self).__init__()
self.size_output = size_output
self.layer1 = nn.Sequential(
nn.Conv2d(shape_pic[2], 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer3 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
size_h = self.size_after_conv(shape_pic[0], 5, 1, 2) #Layer 1 Conv layer
size_h = self.size_after_conv(size_h, 2, 2, 0) #MaxPool of Layer 1
size_h = self.size_after_conv(size_h, 5, 1, 2)
size_h = self.size_after_conv(size_h, 2, 2, 0)
size_h = self.size_after_conv(size_h, 5, 1, 2)
size_h = self.size_after_conv(size_h, 2, 2, 0)
size_w = self.size_after_conv(shape_pic[1], 5, 1, 2)
size_w = self.size_after_conv(size_w, 2, 2, 0)
size_w = self.size_after_conv(size_w, 5, 1, 2)
size_w = self.size_after_conv(size_w, 2, 2, 0)
size_w = self.size_after_conv(size_w, 5, 1, 2)
size_w = self.size_after_conv(size_w, 2, 2, 0)
self.fc = nn.Sequential(
nn.Linear(size_h * size_w * 16, 256),
nn.ReLU())
self.output = nn.Sequential(
nn.Linear(256, size_output))
#nn.Linear(256, size_output),
#nn.ReLU())
# self.optimizer = optim.Adam(self.parameters())
def size_after_conv(self, init_size, kernel_size, stride, padding, dilation=1):
return int((init_size + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)
def forward(self, x):
# img = x[1].numpy()
# img = np.transpose(img, (1, 2, 0))
# plt.imshow(img)
# plt.show()
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x = self.output(x)
return x
class PPOAgent:
def __init__(self,
action_space,
size_obs=13 * config.observations_to_stack,
shape_pic=(96, 144, 3),
size_goal=0,
size_layers=[32, 32],
size_cnn_output=256,
actor_lr=1e-4,
critic_lr=1e-3,
value_loss_coeff=0.1,
gamma=0.99,
gae_lambda=0.95,
epochs=10,
horizon=64,
mini_batch_size=16,
frames_per_action=30,
init_wait=300,
clip=0.2,
entropy_coeff=0.1,
log_std=-0.6,
use_parallel=False,
num_parallel=0,
logs=False,
logs_dir=""):
"""
A controller for any continuous task, using PPO algorithm.
Parameters :
action_space : gym.action_space. Action space of a given environment.
size_obs : Int. Number of elements per observation.
shape_pic : Int triplet. Shape of the (potentially downscaled) input image. The last element is
the number of channels (3 if RGB, 1 if grayscale). If no picture is given as input, shape_pic should be
None.
size_goal : Int. Number of elements per goal.
size_layers : List of int. List of the number of neurons of each hidden layer of the neural network.
The first layer (input) and the last layer (output) must not be part of this list.
size_cnn_output : Int. Size of the output of the CNN, which ends with a fully connected linear layer.
actor_lr : Float. Learning rate for the actor network.
critic_lr : Float. Learning rate for the critic network.
value_loss_coeff : Float. Coefficient of the Critic loss compared to the actor loss when backpropagating
through the common part of the network. The final loss function is :
Loss = Policy_loss + value_loss_coeff * Value_loss
gamma : Float. Discount rate when computing the discounted returns with GAE.
gae_lambda : Float. 'Lambda' of the GAE algorithm, used to regulate the bias-variance trade-off.
epochs : Int. Number of epochs in each update.
horizon : Int. Number of actions taken before each update of the networks.
mini_batch_size : Int. Size of each batch when updating the networks.
frames_per_action : Int. Number of times each action must be repeated. Useful when using environments where
each action has to be taken for more than one frame, or when computing an action with the policy network is
really expensive.
init_wait : Int. Number of frames spent without taking any action. Useful when using environments
in which the first frames are irrelevant, or to wait for everything in the environment to stabilize.
clip : Float. Clipping parameter for the PPO algorithm.
entropy_coeff : Float. Entropy coefficient for the PPO algorithm, used to compute the actor's loss.
log_std : Float. Log of the initial standard deviation used to help the exploration of the actor
network.
use_parallel : Bool. If you are using vectorized environments, set this to True, in order to reshape
all the Tensors accordingly. Otherwise, set it to False.
num_parallel : Int. Number of parallel workers. Used to reshape correctly the actions. If use_parallel is
False, this argument has no effect.
logs : Bool. If True, a tensorboardX writer will save relevant informations about the training.
logs_dir : Str. Comment appended to the name of the directory of the tensorboardX output.
"""
self.num_actions = action_space.shape[0]
self.size_obs = size_obs
self.shape_pic = shape_pic
if shape_pic is None:
size_cnn_output = 0
self.size_goal = size_goal
self.first_step = True
self.device = config.device
self.use_parallel = use_parallel
self.num_parallel = num_parallel
# Hyperparameters
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.value_loss_coeff = value_loss_coeff
self.gamma = gamma
self.gae_lambda = gae_lambda
self.epochs = epochs
self.horizon = horizon
self.mini_batch_size = mini_batch_size
self.clip = clip
self.entropy_coeff = entropy_coeff
self.frames_per_action = frames_per_action
# Models
if shape_pic is not None:
self.cnn = CNN(shape_pic=shape_pic, size_output=size_cnn_output).to(config.device)
self.actor = ModelActor(self.size_obs + size_cnn_output, self.num_actions, size_layers=size_layers, lr=actor_lr, log_std=log_std).to(config.device)
self.critic = ModelCritic(self.size_obs + size_cnn_output, size_layers=size_layers, lr=critic_lr).to(config.device)
self.observations_history = collections.deque(maxlen=config.observations_to_stack)
# Pseudo-memory to be able to update the policy
self.frame = 0 # Used to know where we are compared to the horizon
self.state = None
self.states = []
self.actions = []
self.log_probas = []
self.rewards = []
self.values = []
self.not_done = []
self.action_to_repeat = None
self.num_repeated_action = self.frames_per_action # 'trick' so everything works even at the first step
self.init_wait = init_wait
self.already_waited = 0
# Meta-variable to get some information about the training
self.number_updates = 0
self.logs = logs
if logs:
# self.writer = UtilsTensorboard.writer(logs_dir)
self.writer = config.tensorboard
params = list(self.actor.parameters()) + list(self.critic.parameters())
if self.shape_pic:
params += list(self.cnn.parameters())
self.optimizer = optim.Adam(params=params, lr=config.lr)
######################################################################
# Utility functions
def convert_observation_to_input(self, observation):
# retina = torch.FloatTensor(observation["retina"])
# retina = torch.reshape(retina, (-1, 3, 240, 320))
# goal = torch.FloatTensor(observation["goal"])
# goal = torch.reshape(goal, (-1, 3, 240, 320))
# x = torch.cat((retina, goal))
# x = x.reshape(240 * 320 * 2 * 3)
###################
# To work with joints
if self.use_parallel:
list_obs = []
for obs in observation:
joints = torch.FloatTensor(obs["joint_positions"])
sensors = torch.FloatTensor(obs["touch_sensors"])
curr_obs = torch.cat((joints, sensors)).unsqueeze(0)
list_obs.append(curr_obs)
x = torch.cat(list_obs)
else:
joints = torch.FloatTensor(observation["joint_positions"])
sensors = torch.FloatTensor(observation["touch_sensors"])
x = torch.cat((joints, sensors))
return x
def compute_reward(self, observation): # "Observation" is supposed to contain the goal in itself
retina = torch.FloatTensor(observation["retina"])
goal = torch.FloatTensor(observation["goal"])
###TODO
def save_models(self, path):
torch.save({
"model_actor": self.actor.state_dict(),
"model_critic": self.critic.state_dict(),
"optim_actor": self.actor.optimizer.state_dict(),
"optim_critic": self.critic.optimizer.state_dict()
}, path)
def load_models(self, path):
checkpoint = torch.load(path)
self.actor.load_state_dict(checkpoint["model_actor"])
self.critic.load_state_dict(checkpoint["model_critic"])
self.actor.optimizer.load_state_dict(checkpoint["optim_actor"])
self.critic.optimizer.load_state_dict(checkpoint["optim_critic"])
# self.actor.eval()
# self.critic.eval()
def soft_reset(self):
self.frame = 0 # Used to know where we are compared to the horizon
self.state = None
self.states = []
self.actions = []
self.log_probas = []
self.rewards = []
self.values = []
self.not_done = []
self.action_to_repeat = None
self.num_repeated_action = self.frames_per_action # 'trick' so everything works even at the first step
self.already_waited = 0
self.first_step = True
######################################################################
# Functions used by the PPO algorithm in itself
def compute_returns_gae(self, next_value):
values = self.values + [next_value] # Can't simply append, as it would modify external values
advantage = 0
returns = []
for step in reversed(range(len(self.rewards))):
delta = self.rewards[step] + self.gamma * values[step + 1] * self.not_done[step] - values[step]
advantage = delta + self.gamma * self.gae_lambda * self.not_done[step] * advantage
returns.insert(0, advantage + values[step])
return returns
def ppo_iterator(self, mini_batch_size, states, actions, log_probas, returns, advantages):
n_states = states.size(0)
possible_indices = | np.arange(n_states) | numpy.arange |
import glob
import tensorflow as tf
import numpy as np
import re
import mnist_base as m_b
import input_data
import util_algebra as lalg
import util_logging as ul
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class PlotNNImages(object):
def __init__(self, source_folder, target_folder):
self.BATCH_SIZE = 256
self.cur_batch=0
# Read the data
self.mnist_source = input_data.read_data_sets(source_folder, one_hot=True)
self.mnist_target = input_data.read_data_sets(target_folder, one_hot=True)
def plot_all_nn_images(self):
existing_files = glob.glob('nn_list_a*.npy')
for file_n in existing_files:
after_name = file_n
before_name = re.sub('list_a','list_b', after_name)
batch_id = int(re.sub('.npy','',re.sub('nn_list_a','',after_name)))
after_nns = np.load(after_name)
before_nns = np.load(before_name)
final_im = np.zeros((5,28*3))
diff_im = np.zeros((5,28*3))
for j in range(self.BATCH_SIZE):
target_id = batch_id*self.BATCH_SIZE+j
source_id_after = after_nns[j]
source_id_before = before_nns[j]
row_im = np.concatenate( (self.mnist_target.train.images[target_id].reshape((28, 28)),
self.mnist_source.train.images[source_id_before].reshape((28, 28)),
self.mnist_source.train.images[source_id_after].reshape((28, 28)))
, axis=1 )
final_im = np.concatenate((final_im, row_im), axis=0)
if not source_id_after == source_id_before:
diff_im = | np.concatenate((diff_im, row_im), axis=0) | numpy.concatenate |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.eltwise import eltwise_infer
from mo.middle.passes.fusing.resnet_optimization import stride_optimization
from mo.ops.convolution import Convolution
from mo.ops.pooling import Pooling
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b))
nodes_attributes = {
# Placeholders
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Concat1 operation
'eltwise_1': {'type': 'Maximum', 'kind': 'op', 'op': 'Maximum', 'infer': max_elt_lambda},
'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'},
# Convolutions
'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_1_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_2_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_3_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_3_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_4_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_4_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_5_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_5_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5_data': {'value': None, 'shape': None, 'kind': 'data'},
# ReLU
'relu_1': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_2': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_3': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Pooling
'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling',
'spatial_dims': np.array([2, 3]),
'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'infer': Pooling.infer},
'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'},
}
# In description of unit tests below will be used next syntax: Operation(NxM,XxY), where NxM - kernel size, XxY - stride
class ResnetOptimizationTests(unittest.TestCase):
# Pl->Conv(1x1,1x1)->Conv(1x1,2x2) => Pl->Conv(1x1,2x2)->Conv(1x1,1x1)
def test_resnet_optimization_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(1x1,2x2) => Pl->Conv(3x3,4x4)->Conv(1x1,1x1)
def test_resnet_optimization_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(3x3,2x2) => Same
def test_resnet_optimization_3(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl--->Conv(3x3,2x2)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(3x3,4x4)->ReLU--->Eltwise-->Conv(1x1,1x1)
# `-->Conv(3x3,2x2)->ReLU---` `-->Conv(3x3,4x4)->ReLU---`
def test_resnet_optimization_4(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
'relu_2_data': {'shape': np.array([1, 3, 112, 112])},
'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_3': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_3_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3])},
'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
'relu_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': | np.array([3, 3]) | numpy.array |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
BATCH_SIZE = 8
LR = 0.01
EPSILON = 0.95
GAMMA = 0.99
TARGET_REPLACE_ITER = 100 # After how much time you refresh target network
# MEMORY_CAPACITY = 20 # The size of experience replay buffer
# N_ACTIONS = 2
# # N_STATES = len(X_train)
# N_STATES = 4
class Net(nn.Module):
def __init__(self, N_STATES, N_ACTIONS):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATES, 100)
self.fc1.weight.data.normal_(0, 0.1) # initialization, set seed to ensure the same result
self.out = nn.Linear(100, N_ACTIONS)
self.out.weight.data.normal_(0, 0.1) # initialization
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
action_value = self.out(x)
return action_value
class DQN2(object):
def __init__(self, N_STATES, N_ACTIONS,MEMORY_CAPACITY):
self.eval_net, self.target_net = Net(N_STATES, N_ACTIONS), Net(N_STATES, N_ACTIONS)
self.learn_step_counter = 0
self.memory_counter = 0
self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2))
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
self.loss_func = nn.MSELoss()
self.N_ACTIONS = N_ACTIONS
self.N_STATES = N_STATES
self.MEMORY_CAPACITY = MEMORY_CAPACITY
def choose_action(self, x):
x = torch.unsqueeze(torch.FloatTensor(x), 0)
if | np.random.uniform() | numpy.random.uniform |
COLOR_PALETE_PAR = (10, 150, 400)
MAXDISTANCE = 40
CHARSIZE_PLOT = .5
FONTSIZE = 18
MAX_LEN = 16
import matplotlib.pyplot as plt
import math
import numpy as np
import seaborn as sns
CMAP = sns.diverging_palette(COLOR_PALETE_PAR[0], COLOR_PALETE_PAR[1], n=COLOR_PALETE_PAR[2])
def choseChangeThis(c):
return | np.argmax(c) | numpy.argmax |
"""
.. module:: optimization_problems
:synopsis: Optimization test problems for multi-modal and
box-constrained global optimization
.. moduleauthor:: <NAME> <<EMAIL>>,
<NAME> <<EMAIL>>
:Module: optimization_problems
:Author: <NAME> <<EMAIL>>,
<NAME> <<EMAIL>>
"""
import numpy as np
import abc
from abc import abstractmethod
class OptimizationProblem(object):
"""Base class for optimization problems."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.dim = None
self.lb = None
self.ub = None
self.int_var = None
self.cont_var = None
def __check_input__(self, x):
if len(x) != self.dim:
raise ValueError('Dimension mismatch')
@abstractmethod
def eval(self, record): # pragma: no cover
pass
# ========================= 2-dimensional =======================
class GoldsteinPrice(OptimizationProblem):
def __init__(self):
self.info = "2-dimensional Goldstein-Price function"
self.min = 3.0
self.minimum = np.array([0, -1])
self.dim = 2
self.lb = -2.0 * np.ones(2)
self.ub = 2.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
def eval(self, x):
"""Evaluate the GoldStein Price function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
x1 = x[0]
x2 = x[1]
fact1a = (x1 + x2 + 1) ** 2
fact1b = 19 - 14 * x1 + 3 * x1 ** 2 - \
14 * x2 + 6 * x1 * x2 + 3 * x2 ** 2
fact1 = 1 + fact1a * fact1b
fact2a = (2 * x1 - 3 * x2) ** 2
fact2b = 18 - 32 * x1 + 12 * x1 ** 2 + 48 * x2 - \
36 * x1 * x2 + 27 * x2 ** 2
fact2 = 30 + fact2a * fact2b
return fact1 * fact2
class SixHumpCamel(OptimizationProblem):
"""Six-hump camel function
Details: https://www.sfu.ca/~ssurjano/camel6.html
Global optimum: :math:`f(0.0898,-0.7126)=-1.0316`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = -1.0316
self.minimum = np.array([0.0898, -0.7126])
self.dim = 2
self.lb = -3.0 * np.ones(2)
self.ub = 3.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
self.info = "2-dimensional Six-hump function \nGlobal optimum: " +\
"f(0.0898, -0.7126) = -1.0316"
def eval(self, x):
"""Evaluate the Six Hump Camel function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return (4.0 - 2.1*x[0]**2 + (x[0]**4)/3.0)*x[0]**2 + \
x[0]*x[1] + (-4 + 4*x[1]**2) * x[1]**2
class Branin(OptimizationProblem):
"""Branin function
Details: http://www.sfu.ca/~ssurjano/branin.html
Global optimum: :math:`f(-\\pi,12.275)=0.397887`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = 0.397887
self.minimum = np.array([-np.pi, 12.275])
self.dim = 2
self.lb = -3.0 * np.ones(2)
self.ub = 3.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
self.info = "2-dimensional Branin function \nGlobal optimum: " +\
"f(-pi, 12.275) = 0.397887"
def eval(self, x):
"""Evaluate the Branin function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
x1 = x[0]
x2 = x[1]
t = 1 / (8 * np.pi)
s = 10
r = 6
c = 5 / np.pi
b = 5.1 / (4 * np.pi ** 2)
a = 1
term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2
term2 = s * (1 - t) * np.cos(x1)
return term1 + term2 + s
# ========================= 3-dimensional =======================
class Hartman3(OptimizationProblem):
"""Hartman 3 function
Details: http://www.sfu.ca/~ssurjano/hart3.html
Global optimum: :math:`f(0.114614,0.555649,0.852547)=-3.86278`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.dim = 3
self.lb = np.zeros(3)
self.ub = np.ones(3)
self.int_var = np.array([])
self.cont_var = np.arange(0, 3)
self.min = -3.86278
self.minimum = np.array([0.114614, 0.555649, 0.852547])
self.info = "3-dimensional Hartman function \nGlobal optimum: " +\
"f(0.114614,0.555649,0.852547) = -3.86278"
def eval(self, x):
"""Evaluate the Hartman 3 function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
alpha = np.array([1, 1.2, 3, 3.2])
A = np.array([[3.0, 10.0, 30.0], [0.1, 10.0, 35.0],
[3.0, 10.0, 30.0], [0.1, 10.0, 35.0]])
P = np.array([[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.747],
[0.1091, 0.8732, 0.5547],
[0.0381, 0.5743, 0.8828]])
outer = 0
for ii in range(4):
inner = 0
for jj in range(3):
xj = x[jj]
Aij = A[ii, jj]
Pij = P[ii, jj]
inner += Aij * ((xj-Pij) ** 2)
outer += alpha[ii] * np.exp(-inner)
return -outer
# =========================6-dimensional =======================
class Hartman6(OptimizationProblem):
"""Hartman 6 function
Details: http://www.sfu.ca/~ssurjano/hart6.html
Global optimum: :math:`f(0.201,0.150,0.476,0.275,0.311,0.657)=-3.322`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = -3.32237
self.minimum = np.array([0.20169, 0.150011, 0.476874,
0.275332, 0.311652, 0.6573])
self.dim = 6
self.lb = np.zeros(6)
self.ub = np.ones(6)
self.int_var = np.array([])
self.cont_var = np.arange(0, 6)
self.info = "6-dimensional Hartman function \nGlobal optimum: " + \
"f(0.2016,0.15001,0.47687,0.27533,0.31165,0.657) = -3.3223"
def eval(self, x):
"""Evaluate the Hartman 6 function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0]])
P = 1e-4 * np.array([[1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],
[2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],
[2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],
[4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0]])
outer = 0
for ii in range(4):
inner = 0
for jj in range(6):
xj = x[jj]
Aij = A[ii, jj]
Pij = P[ii, jj]
inner += Aij * ((xj - Pij) ** 2)
outer += alpha[ii] * np.exp(-inner)
return -outer
# ========================= n-dimensional =======================
class Rastrigin(OptimizationProblem):
"""Rastrigin function
.. math::
f(x_1,\\ldots,x_n)=10n-\\sum_{i=1}^n (x_i^2 - 10 \\cos(2 \\pi x_i))
subject to
.. math::
-5.12 \\leq x_i \\leq 5.12
Global optimum: :math:`f(0,0,...,0)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -5.12 * np.ones(dim)
self.ub = 5.12 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Rastrigin function \n" + \
"Global optimum: f(0,0,...,0) = 0"
def eval(self, x):
"""Evaluate the Rastrigin function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return 10 * self.dim + sum(x**2 - 10 * np.cos(2 * np.pi * x))
class Ackley(OptimizationProblem):
"""Ackley function
.. math::
f(x_1,\\ldots,x_n) = -20\\exp\\left( -0.2 \\sqrt{\\frac{1}{n} \
\\sum_{j=1}^n x_j^2} \\right) -\\exp \\left( \\frac{1}{n} \
\\sum{j=1}^n \\cos(2 \\pi x_j) \\right) + 20 - e
subject to
.. math::
-15 \\leq x_i \\leq 20
Global optimum: :math:`f(0,0,...,0)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -15 * np.ones(dim)
self.ub = 20 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Ackley function \n" +\
"Global optimum: f(0,0,...,0) = 0"
def eval(self, x):
"""Evaluate the Ackley function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
d = float(self.dim)
return -20.0 * np.exp(-0.2*np.sqrt(np.sum(x**2) / d)) - \
np.exp(np.sum(np.cos(2.0*np.pi*x)) / d) + 20 + np.exp(1)
class Michalewicz(OptimizationProblem):
"""Michalewicz function
.. math::
f(x_1,\\ldots,x_n) = -\\sum_{i=1}^n \\sin(x_i) \\sin^{20}
\\left( \\frac{ix_i^2}{\\pi} \\right)
subject to
.. math::
0 \\leq x_i \\leq \\pi
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.lb = np.zeros(dim)
self.ub = np.pi * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Michalewicz function \n" + \
"Global optimum: ??"
def eval(self, x):
"""Evaluate the Michalewicz function at x.
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return -np.sum(np.sin(x) * (
np.sin(((1 + np.arange(self.dim)) * x**2)/np.pi)) ** 20)
class Levy(OptimizationProblem):
"""Levy function
Details: https://www.sfu.ca/~ssurjano/levy.html
Global optimum: :math:`f(1,1,...,1)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0.0
self.minimum = np.ones(dim)
self.lb = -5 * np.ones(dim)
self.ub = 5 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Levy function \n" +\
"Global optimum: f(1,1,...,1) = 0"
def eval(self, x):
"""Evaluate the Levy function at x.
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
w = 1 + (x - 1.0) / 4.0
d = self.dim
return np.sin(np.pi*w[0]) ** 2 + \
np.sum((w[1:d-1]-1)**2 * (1 + 10*np.sin(np.pi*w[1:d-1]+1)**2)) + \
(w[d-1] - 1)**2 * (1 + np.sin(2*np.pi*w[d-1])**2)
class Griewank(OptimizationProblem):
"""Griewank function
.. math::
f(x_1,\\ldots,x_n) = 1 + \\frac{1}{4000} \\sum_{j=1}^n x_j^2 - \
\\prod_{j=1}^n \\cos \\left( \\frac{x_i}{\\sqrt{i}} \\right)
subject to
.. math::
-512 \\leq x_i \\leq 512
Global optimum: :math:`f(0,0,...,0)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -512 * np.ones(dim)
self.ub = 512 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Griewank function \n" +\
"Global optimum: f(0,0,...,0) = 0"
def eval(self, x):
"""Evaluate the Griewank function at x.
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
total = 1
for i, y in enumerate(x):
total *= np.cos(y / np.sqrt(i + 1))
return 1.0 / 4000.0 * sum([y**2 for y in x]) - total + 1
class Rosenbrock(OptimizationProblem):
"""Rosenbrock function
.. math::
f(x_1,\\ldots,x_n) = \\sum_{j=1}^{n-1} \
\\left( 100(x_j^2-x_{j+1})^2 + (1-x_j)^2 \\right)
subject to
.. math::
-2.048 \\leq x_i \\leq 2.048
Global optimum: :math:`f(1,1,...,1)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.ones(dim)
self.lb = -2.048 * np.ones(dim)
self.ub = 2.048 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = | np.arange(0, dim) | numpy.arange |
# -*- coding: utf-8 -*-
from __future__ import print_function
import math
import numpy
import numpy.linalg
import MDAnalysis
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class KabschAlign(object):
def __init__(self):
"""
Constructor
"""
def kabsch(self, toXYZ, fromXYZ):
"""
Input is a 3 x N array of coordinates.
"""
# This file has been edited to produce identical results as the original matlab implementation.
len1 = numpy.shape(fromXYZ);
len2 = numpy.shape(toXYZ);
if not(len1[1] == len2[1]):
print('KABSCH: unequal array sizes');
return;
m1 = numpy.mean(fromXYZ, 1).reshape((len1[0],1)); # print numpy.shape(m1);
m2 = numpy.mean(toXYZ, 1).reshape((len2[0],1));
tmp1 = numpy.tile(m1,len1[1]);
tmp2 = numpy.tile(m1,len2[1]);
assert numpy.allclose(tmp1, tmp2);
assert tmp1.shape == fromXYZ.shape;
assert tmp2.shape == toXYZ.shape;
t1 = fromXYZ - tmp1;
t2 = toXYZ - tmp2;
[u, s, wh] = numpy.linalg.svd(numpy.dot(t2,t1.T));
w = wh.T;
R = numpy.dot(numpy.dot(u,[[1, 0, 0],[0, 1, 0],[0, 0, numpy.linalg.det(numpy.dot(u,w.T))]]), w.T);
T = m2 - numpy.dot(R,m1);
tmp3 = numpy.reshape(numpy.tile(T,(len2[1])),(len1[0],len1[1]));
err = toXYZ - numpy.dot(R,fromXYZ) - tmp3;
#eRMSD = math.sqrt(sum(sum((numpy.dot(err,err.T))))/len2[1]);
eRMSD = math.sqrt(sum(sum(err**2))/len2[1]);
return (R, T, eRMSD, err.T);
def wKabschDriver(self, toXYZ, fromXYZ, sMed=1.5, maxIter=20):
scaleMed = sMed;
weights = numpy.ones( numpy.shape(toXYZ)[1] ); #print 'weights: ', numpy.shape(weights);
flagOut = 0;
Rc = []; Tc = []; sigc = [];
for itr in range(0, maxIter):
[R, T, eRMSD, err] = self.wKabsch(toXYZ, fromXYZ, weights);
Rc.append(R);
Tc.append(T);
tmp1 = numpy.reshape(numpy.tile(T, (numpy.shape(toXYZ[1]))), (numpy.shape(toXYZ)[0],numpy.shape(toXYZ)[1]));
deltaR = numpy.array( numpy.dot(R, fromXYZ) + tmp1 - toXYZ ); #print 'deltaR shape: ', numpy.shape(deltaR);
#print deltaR;
#numpy.save('deltaR.npy', deltaR);
nDeltaR = numpy.sqrt(numpy.sum(deltaR**2, axis = 0)); #print 'nDeltaR shape:', numpy.shape(nDeltaR);
sig = scaleMed*numpy.median(nDeltaR);
sigc.append(sig);
weights = (sig**2)/((sig**2 + nDeltaR**2)**2); #print numpy.shape(weights);
return ( R, T, eRMSD, err);
def wKabsch(self, toXYZ, fromXYZ, weights):
len1 = numpy.shape(fromXYZ); #print 'len1: ', len1;
len2 = | numpy.shape(toXYZ) | numpy.shape |
#! /usr/bin/env python
from test.fail import As_fail, Y_fail
from scipy.stats import ortho_group
import unittest
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from functools import reduce
from src.operations import invert
from src.kronprod import KronProd
class TestKronInv(unittest.TestCase):
# add global stuff here
def setUp(self):
return
# add global stuff here
def setUp(self):
return
#Make some tests if the matrix IS invertible
def testOnes_inv(self):
A1 = [ np.array([[1., 1.], [1.,1.]]),
np.array([[1.,1.], [1.,1.]])]
x1 = np.array([1.,1.,1.,1.])
y1 = np.array([4,4,4,4])
kp = KronProd(invert(A1))
x = kp.dot(y1)
np.testing.assert_almost_equal(x, x1, decimal=7, verbose=True)
# this dimensionality pushes the limit of what full rank calc can do
def testRandom_inv(self):
n = 5 # number of factors
p = 5 # dimension of factor
r_As = [ortho_group.rvs(dim=p) for i in range(n)]
As = [m/m.sum(axis=1)[:,None] for m in r_As] # normalize each row
y = np.random.rand(p**n)
big_A = reduce(np.kron, As)
big_x = np.linalg.solve(big_A, y)
print("full calc: ",big_x)
kp = KronProd(invert(As))
x = kp.dot(y)
print("efficient calc: ", x)
np.testing.assert_almost_equal(x, big_x, decimal=7, verbose=True)
def testBig_inv(self):
n = 2 # number of factors
p = 100 # dimension of factor
r_As = [ortho_group.rvs(dim=p) for i in range(n)]
As = [m/m.sum(axis=1)[:,None] for m in r_As] # normalize each row
y = np.random.rand(p**n)
kp = KronProd(invert(As))
x = kp.dot(y)
print("efficient calc: ", x)
#Make some tests if the matrix ISNT invertible
def testInts_pInv(self):
A1 = [ np.array([[1.0, 0.0], [0.0,0.0]]),
np.array([[1.,1.], [0.,0.]])]
y1 = np.array([1.,2.,3.,4.])
A1_inv = []
for a in A1:
A1_inv.append(np.linalg.pinv(a))
big_A = reduce(np.kron, A1_inv)
big_x = big_A.dot(y1)
print("FOO")
print("full calc: ",big_x)
kp = KronProd(invert(A1))
x = kp.dot(y1)
print("efficient calc: ", x)
print("BAR")
self.assertSequenceEqual(list(x), list(big_x))
# this dimensionality pushes the limit of what full rank calc can do
def testRandom_pInv(self):
n = 5 # number of factors
p = 5 # dimension of factor
r_As = [ortho_group.rvs(dim=p) for i in range(n)]
#Make first and second row the same so that way it becomes a non-invertible matrix
for A in r_As:
A[1,:] = A[0,:]
As = [m/m.sum(axis=1)[:,None] for m in r_As] # normalize each row
y = | np.random.rand(p**n) | numpy.random.rand |
from direct.showbase.ShowBase import ShowBase
from panda3d.core import WindowProperties, Texture, TextureStage, DirectionalLight, VBase4, VBase2, AmbientLight, LineSegs, GeomPoints, NodePath, CompassEffect, MouseButton, SamplerState
from direct.gui.OnscreenText import OnscreenText
from PIL import Image
import threading
import numpy as np
from matplotlib import cm
import shapefile
class EarthPlot(ShowBase):
def __init__(self,show_poles = False, highres_earth = False, d_light = True, d_light_HPR = (90,0,23.5), d_light_strength = (1,1,1,1), *args,**kwargs):
ShowBase.__init__(self,*args,**kwargs)
self.canvas_width = 900
self.canvas_height = 900
self.sphere_size = 100
self.time_elapsed = 0 #seconds
self.earth_radius = 6378.137 #km
self.plot_radius = self.earth_radius+5
self.time_step = 10 #seconds
self.groundstations = [] #list for current groundstation objects
self.drag_id = ''
self.x_mouse_position = 0
self.y_mouse_position = 0
self.props = WindowProperties()
# self.props.setOrigin(100, 100)
self.props.setSize(int(self.canvas_width), int(self.canvas_height))
# self.openDefaultWindow(props=self.props)
self.setBackgroundColor(0,0,0)
#load sphere model
self.earth_nodepath= loader.loadModel('./resources/sphere.egg')
self.earth_nodepath.reparentTo(self.render)
self.earth_nodepath.setScale(self.earth_radius/self.sphere_size)
#enable shaders (gloss, ..)
self.earth_nodepath.setShaderAuto()
#initiate textures
self.RGB_tex = Texture()
self.gloss_tex = Texture()
self.emis_tex = Texture()
#loading images
if highres_earth:
img = Image.open("./resources/8081_earthmap10k.jpg")
gloss_img = Image.open("./resources/8081_earthspec10k.jpg")
else:
img = Image.open("./resources/8081_earthmap4k.jpg")
gloss_img = Image.open("./resources/8081_earthspec4k.jpg")
img = np.flipud(np.array(img))
gloss_img = np.flipud(np.array(gloss_img)) *0.7
self.resolution = img.shape
#setting RGB texture
self.RGB_tex.setup2dTexture(self.resolution[1],self.resolution[0], Texture.T_unsigned_byte, Texture.F_rgb8)
self.RGB_tex.setMagfilter(SamplerState.FT_linear)
RGB_buff = img.astype(np.uint8).tobytes()
self.RGB_tex.setRamImageAs(RGB_buff, 'RGB')
self.earth_nodepath.setTexture(self.RGB_tex)
#setting gloss/specularity texture
gloss_ts = TextureStage('glossmap')
gloss_ts.setMode(TextureStage.MGloss)
self.gloss_tex.setup2dTexture(self.resolution[1],self.resolution[0], Texture.T_unsigned_byte, Texture.F_alpha)
gloss_buff = gloss_img.astype(np.uint8).tobytes()
self.gloss_tex.setRamImage(gloss_buff)
self.earth_nodepath.setTexture(gloss_ts,self.gloss_tex)
#lights
#directional light
dlight = DirectionalLight('dlight')
dlight.setColor(VBase4(d_light_strength[0], d_light_strength[1], d_light_strength[2], d_light_strength[3]))
self.dlnp = render.attachNewNode(dlight)
self.dlnp.setHpr(d_light_HPR[0], d_light_HPR[1], d_light_HPR[2])
if d_light:
render.setLight(self.dlnp)
#amblight earth
amblight_earth = AmbientLight('amblight_earth')
if d_light:
amblight_earth.setColor(VBase4(.5, .5, .5, 1))
else:
amblight_earth.setColor(VBase4(.9, .9, .9, 1))
self.alnp = self.earth_nodepath.attachNewNode(amblight_earth)
self.earth_nodepath.setLight(self.alnp)
#amblight lines
self.amblight_lines = AmbientLight('amblight_lines')
self.amblight_lines.setColor(VBase4(1, 1, 1, 1))
if show_poles:
earth_poles = LineSegs()
earth_poles.setThickness(4)
earth_poles.setColor(1,0,0)
earth_poles.moveTo(0,0,0)
earth_poles.drawTo(0,0,-6600)
earth_poles.moveTo(0,0,0)
earth_poles.setColor(0,0,1)
earth_poles.drawTo(0,0,6600)
node = earth_poles.create()
line_np = NodePath(node)
line_np.reparentTo(self.render)
alnp = line_np.attachNewNode(self.amblight_lines)
line_np.setLight(alnp)
#camera settings
self.disableMouse()
self.parentnode = render.attachNewNode('camparent')
self.parentnode.reparentTo(self.earth_nodepath) # inherit transforms
self.parentnode.setEffect(CompassEffect.make(render)) # NOT inherit rotation
self.camera.reparentTo(self.parentnode)
self.camera.setY(-self.earth_radius/15) # camera distance from model
self.camera.lookAt(self.parentnode)
self.heading = 0
self.pitch = 0
self.taskMgr.add(self.OrbitCameraTask, 'thirdPersonCameraTask')
self.accept('wheel_up', lambda : self.set_camera_fov(self.camLens.getFov(),+0.5))
self.accept('wheel_down', lambda : self.set_camera_fov(self.camLens.getFov(),-0.5))
def set_camera_fov(self,fov, value):
new_fov = fov[0] + value
H = new_fov*np.pi/180
x_size = self.win.getXSize()
y_size = self.win.getYSize()
inv_aspect = y_size/x_size
V = 2*np.arctan(np.tan(H/2)* inv_aspect)*180/np.pi
fov_lr = new_fov
fov_ud = V
if (fov_lr > 1) and (fov_ud > 1):
if (fov_lr <= 120) and (fov_ud <= 120):
self.camLens.setFov(VBase2(fov_lr, fov_ud))
return
def show(self):
self.run()
def plot_lines(self,xs,ys,zs, color = [0,0,0], linewidth = 2, shading = 'None'):
'''
takes 3 coordinates in a list and plots them as continuing line
'''
line = [(x,y,z) for x,y,z in zip(xs,ys,zs)]
lines = LineSegs()
lines.setColor(color[0], color[1], color[2])
for point in line:
lines.drawTo(point[0],point[1],point[2])
lines.setThickness(linewidth)
node = lines.create()
line_np = NodePath(node)
line_np.reparentTo(self.render)
if shading == 'None':
alnp = line_np.attachNewNode(self.amblight_lines)
line_np.setLight(alnp)
elif shading == 'ambient':
line_np.setLight(self.alnp)
elif shading == 'directional':
line_np.setLight(self.dlnp)
else:
raise ValueError('shading must be either "None", "ambient" or "directional".')
return line_np
def plot_markers(self,xs,ys,zs, color = [0,0,0], markersize = 1):
'''
takes list of coordinates and plots a spherical marker on each one
'''
np_list = []
points = [(x,y,z) for x,y,z in zip(xs,ys,zs)]
for point in points:
np = loader.loadModel('./resources/sphere.egg')
np.reparentTo(self.render)
alnp = np.attachNewNode(self.amblight_lines)
np.setLight(alnp)
np.setColor(color[0], color[1], color[2])
np.setPos(point[0], point[1], point[2])
np.setScale(markersize)
np_list.append(np)
return np_list
def calc_orbit(self,a,e,Omega,i,omega, resolution = 100):
'''calculates orbit 3x1 radius vectors from kepler elements'''
nu = np.linspace(0,2*np.pi,resolution)
if e <1:
p = a * (1-(e**2))
r = p/(1+e*np.cos(nu))
r = np.array([np.multiply(r,np.cos(nu)) , np.multiply(r,np.sin(nu)), np.zeros(len(nu))])
r = np.matmul(self.rot_z(omega),r)
r = np.matmul(self.rot_x(i),r)
r = np.matmul(self.rot_z(Omega),r)
elif e >=1:
raise ValueError('eccentricity must be smaller than 1, hyperbolic and parabolic orbits are not supported')
return r
@staticmethod
def rot_x(phi):
'''returns rotational matrix around x, phi in rad'''
return np.array([[1,0,0],[0,np.cos(phi),-np.sin(phi)],[0,np.sin(phi),np.cos(phi)]])
@staticmethod
def rot_z(rho):
'''returns rotational matrix around z, rho in rad'''
return np.array([[np.cos(rho),-np.sin(rho),0],[np.sin(rho),np.cos(rho),0],[0,0,1]])
def plot_orbit(self,a,e,Omega,i,omega, resolution = 100, **args):
r = self.calc_orbit(a,e,Omega,i,omega, resolution)
xs = r[0]
ys = r[1]
zs = r[2]
np = self.plot_lines(xs,ys,zs, **args)
return np
def plot_surface_markers(self, lons, lats, **args):
xs, ys, zs = [], [], []
np_list = []
for lon,lat in zip(lons,lats):
x, y, z = self.calculate_cartesian(lon,lat)
xs.append(x), ys.append(y), zs.append(z)
np_list = self.plot_markers(xs,ys,zs, **args)
return np_list
def plot_surface_lines(self, lons, lats, **args):
xs, ys, zs = [], [], []
np_list = []
for lon,lat in zip(lons,lats):
x, y, z = self.calculate_cartesian(lon,lat)
xs.append(x), ys.append(y), zs.append(z)
np_list = self.plot_lines(xs,ys,zs, **args)
return np_list
def calculate_cartesian(self, lon, lat):
lon, lat = np.pi*lon/180, np.pi*lat/180
x = self.plot_radius * np.cos(lat) * np.cos(lon)
y = self.plot_radius * np.cos(lat) * np.sin(lon)
z = self.plot_radius * np.sin(lat)
return x,y,z
def plot_greatcircle(self, lon1,lat1, lon2,lat2, resolution = 500, **args):
lons,lats = self.greatcircle_fun(lon1,lat1, lon2,lat2, resolution = resolution)
nodepath = self.plot_surface_lines(lons,lats, **args)
return nodepath
def greatcircle_fun(self, lon1,lat1, lon2,lat2, resolution = 500):
lons = np.linspace(0,2*np.pi,resolution)
lat1, lon1 = np.pi*lat1/180, np.pi*lon1/180
lat2, lon2 = np.pi*lat2/180, np.pi*lon2/180
lats = np.arctan( np.tan(lat1)*( np.sin(lons - lon2) / np.sin(lon1 - lon2) ) - np.tan(lat2)* ( np.sin(lons-lon1) / np.sin(lon1 - lon2)) )
lons = 180*lons/np.pi
lats = 180*lats/np.pi
return lons, lats
def plot_geodetic(self, lon1,lat1, lon2,lat2, resolution = 50,**args):
lat1, lon1 = np.pi*lat1/180, np.pi*lon1/180
lat2, lon2 = np.pi*lat2/180, np.pi*lon2/180
lamb_12 = lon2 - lat1
alpha_1 = np.arctan2 ( (np.cos(lat2)* | np.sin(lamb_12) | numpy.sin |
from __future__ import division
import numpy as NP
import scipy.constants as FCNST
from scipy import interpolate, ndimage
import datetime as DT
import progressbar as PGB
import os, ast
import copy
import astropy
from astropy.io import fits, ascii
from astropy.coordinates import Galactic, SkyCoord, ICRS, FK5, AltAz, EarthLocation
from astropy import units
from astropy.time import Time
import warnings
import h5py
from distutils.version import LooseVersion
import psutil
from astroutils import geometry as GEOM
from astroutils import gridding_modules as GRD
from astroutils import constants as CNST
from astroutils import DSP_modules as DSP
from astroutils import catalog as SM
from astroutils import lookup_operations as LKP
from astroutils import nonmathops as NMO
import prisim
import baseline_delay_horizon as DLY
import primary_beams as PB
try:
import pyuvdata
from pyuvdata import UVData
from pyuvdata import utils as UVUtils
except ImportError:
uvdata_module_found = False
else:
uvdata_module_found = True
try:
from mwapy.pb import primary_beam as MWAPB
except ImportError:
mwa_tools_found = False
else:
mwa_tools_found = True
prisim_path = prisim.__path__[0]+'/'
################################################################################
def _astropy_columns(cols, tabtype='BinTableHDU'):
"""
----------------------------------------------------------------------------
!!! FOR INTERNAL USE ONLY !!!
This internal routine checks for Astropy version and produces the FITS
columns based on the version
Inputs:
cols [list of Astropy FITS columns] These are a list of Astropy FITS
columns
tabtype [string] specifies table type - 'BinTableHDU' (default) for binary
tables and 'TableHDU' for ASCII tables
Outputs:
columns [Astropy FITS column data]
----------------------------------------------------------------------------
"""
try:
cols
except NameError:
raise NameError('Input cols not specified')
if tabtype not in ['BinTableHDU', 'TableHDU']:
raise ValueError('tabtype specified is invalid.')
use_ascii = False
if tabtype == 'TableHDU':
use_ascii = True
if astropy.__version__ == '0.4':
columns = fits.ColDefs(cols, tbtype=tabtype)
elif LooseVersion(astropy.__version__)>=LooseVersion('0.4.2'):
columns = fits.ColDefs(cols, ascii=use_ascii)
return columns
################################################################################
def thermalNoiseRMS(A_eff, df, dt, Tsys, nbl=1, nchan=1, ntimes=1,
flux_unit='Jy', eff_Q=1.0):
"""
-------------------------------------------------------------------------
Generates thermal noise RMS from instrument parameters for a complex-
valued visibility measurement by an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
A_eff [scalar or numpy array] Effective area of the interferometer.
Has to be in units of m^2. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes). Must
be specified. No defaults.
df [scalar] Frequency resolution (in Hz). Must be specified. No
defaults.
dt [scalar] Time resolution (in seconds). Must be specified. No
defaults.
Tsys [scalar or numpy array] System temperature (in K).
If only a scalar value provided, it will be assumed to be
identical for all the interferometers. Otherwise, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). Must be specified. No defaults.
nbl [integer] Number of baseline vectors. Default=1
nchan [integer] Number of frequency channels. Default=1
ntimes [integer] Number of time stamps. Default=1
flux_unit [string] Units of thermal noise RMS to be returned. Accepted
values are 'K' or 'Jy' (default)
eff_Q [scalar or numpy array] Efficiency of the interferometer(s).
Has to be between 0 and 1. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes).
Default=1.0
Output:
Numpy array of thermal noise RMS (in units of K or Jy depending on
flux_unit) of shape (nbl, nchan, ntimes) expected on a complex-valued
visibility measurement from an interferometer. 1/sqrt(2) of this goes
each into the real and imaginary parts.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
-------------------------------------------------------------------------
"""
try:
A_eff, df, dt, Tsys
except NameError:
raise NameError('Inputs A_eff, df, dt, and Tsys must be specified')
if not isinstance(df, (int,float)):
raise TypeError('Input channel resolution must be a scalar')
else:
df = float(df)
if not isinstance(dt, (int,float)):
raise TypeError('Input time resolution must be a scalar')
else:
dt = float(dt)
if not isinstance(nbl, int):
raise TypeError('Input nbl must be an integer')
else:
if nbl <= 0:
raise ValueError('Input nbl must be positive')
if not isinstance(nchan, int):
raise TypeError('Input nchan must be an integer')
else:
if nchan <= 0:
raise ValueError('Input nchan must be positive')
if not isinstance(ntimes, int):
raise TypeError('Input ntimes must be an integer')
else:
if ntimes <= 0:
raise ValueError('Input ntimes must be positive')
if not isinstance(Tsys, (int,float,list,NP.ndarray)):
raise TypeError('Input Tsys must be a scalar, float, list or numpy array')
if isinstance(Tsys, (int,float)):
Tsys = NP.asarray(Tsys, dtype=NP.float).reshape(1,1,1)
else:
Tsys = NP.asarray(Tsys, dtype=NP.float)
if NP.any(Tsys < 0.0):
raise ValueError('Value(s) in Tsys cannot be negative')
if (Tsys.shape != (1,1,1)) and (Tsys.shape != (1,nchan,1)) and (Tsys.shape != (1,1,ntimes)) and (Tsys.shape != (nbl,1,1)) and (Tsys.shape != (nbl,nchan,1)) and (Tsys.shape != (nbl,1,ntimes)) and (Tsys.shape != (1,nchan,ntimes)) and (Tsys.shape != (nbl,nchan,ntimes)):
raise IndexError('System temperature specified has incompatible dimensions')
if not isinstance(A_eff, (int,float,list,NP.ndarray)):
raise TypeError('Input A_eff must be a scalar, float, list or numpy array')
if isinstance(A_eff, (int,float)):
A_eff = NP.asarray(A_eff, dtype=NP.float).reshape(1,1,1)
else:
A_eff = NP.asarray(A_eff, dtype=NP.float)
if NP.any(A_eff < 0.0):
raise ValueError('Value(s) in A_eff cannot be negative')
if (A_eff.shape != (1,1,1)) and (A_eff.shape != (1,nchan,1)) and (A_eff.shape != (1,1,ntimes)) and (A_eff.shape != (nbl,1,1)) and (A_eff.shape != (nbl,nchan,1)) and (A_eff.shape != (nbl,1,ntimes)) and (A_eff.shape != (1,nchan,ntimes)) and (A_eff.shape != (nbl,nchan,ntimes)):
raise IndexError('Effective area specified has incompatible dimensions')
if not isinstance(eff_Q, (int,float,list,NP.ndarray)):
raise TypeError('Input eff_Q must be a scalar, float, list or numpy array')
if isinstance(eff_Q, (int,float)):
eff_Q = NP.asarray(eff_Q, dtype=NP.float).reshape(1,1,1)
else:
eff_Q = NP.asarray(eff_Q, dtype=NP.float)
if NP.any(eff_Q < 0.0):
raise ValueError('Value(s) in eff_Q cannot be negative')
if (eff_Q.shape != (1,1,1)) and (eff_Q.shape != (1,nchan,1)) and (eff_Q.shape != (1,1,ntimes)) and (eff_Q.shape != (nbl,1,1)) and (eff_Q.shape != (nbl,nchan,1)) and (eff_Q.shape != (nbl,1,ntimes)) and (eff_Q.shape != (1,nchan,ntimes)) and (eff_Q.shape != (nbl,nchan,ntimes)):
raise IndexError('Effective area specified has incompatible dimensions')
if not isinstance(flux_unit, str):
raise TypeError('Input flux_unit must be a string')
else:
if flux_unit.lower() not in ['k', 'jy']:
raise ValueError('Input flux_unit must be set to K or Jy')
if flux_unit.lower() == 'k':
rms = Tsys/eff_Q/NP.sqrt(dt*df)
else:
rms = 2.0 * FCNST.k / NP.sqrt(dt*df) * (Tsys/A_eff/eff_Q) / CNST.Jy
return rms
################################################################################
def generateNoise(noiseRMS=None, A_eff=None, df=None, dt=None, Tsys=None, nbl=1,
nchan=1, ntimes=1, flux_unit='Jy', eff_Q=None):
"""
-------------------------------------------------------------------------
Generates thermal noise from instrument parameters for a complex-valued
visibility measurement from an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
noiseRMS [NoneType or scalar or numpy array] If set to None (default),
the rest of the parameters are used in determining the RMS of
thermal noise. If specified as scalar, all other parameters
will be ignored in estimating noiseRMS and this value will be
used instead. If specified as a numpy array, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). It is assumed to be an RMS comprising of
both real and imaginary parts. Therefore, 1/sqrt(2) of this
goes into each of the real and imaginary parts.
A_eff [scalar or numpy array] Effective area of the interferometer.
Has to be in units of m^2. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes). Will
apply only if noiseRMS is set to None
df [scalar] Frequency resolution (in Hz). Will apply only if
noiseRMS is set to None
dt [scalar] Time resolution (in seconds). Will apply only if
noiseRMS is set to None
Tsys [scalar or numpy array] System temperature (in K).
If only a scalar value provided, it will be assumed to be
identical for all the interferometers. Otherwise, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). Will apply only if noiseRMS is set to None
nbl [integer] Number of baseline vectors. Default=1
nchan [integer] Number of frequency channels. Default=1
ntimes [integer] Number of time stamps. Default=1
flux_unit [string] Units of thermal noise RMS to be returned. Accepted
values are 'K' or 'Jy' (default). Will only apply if noiseRMS
is set to None. Otherwise the flux_unit will be ignored and
the returned value will be in same units as noiseRMS
eff_Q [scalar or numpy array] Efficiency of the interferometer(s).
Has to be between 0 and 1. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes).
Default=1.0. Will apply only if noiseRMS is set to None
Output:
Numpy array of thermal noise (units of noiseRMS if specified or in units
of K or Jy depending on flux_unit) of shape (nbl, nchan, ntimes) for a
complex-valued visibility measurement from an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
-------------------------------------------------------------------------
"""
if noiseRMS is None:
noiseRMS = thermalNoiseRMS(A_eff, df, dt, Tsys, nbl=nbl, nchan=nchan, ntimes=ntimes, flux_unit=flux_unit, eff_Q=eff_Q)
else:
if not isinstance(noiseRMS, (int,float,list,NP.ndarray)):
raise TypeError('Input noiseRMS must be a scalar, float, list or numpy array')
if isinstance(noiseRMS, (int,float)):
noiseRMS = NP.asarray(noiseRMS, dtype=NP.float).reshape(1,1,1)
else:
noiseRMS = NP.asarray(noiseRMS, dtype=NP.float)
if NP.any(noiseRMS < 0.0):
raise ValueError('Value(s) in noiseRMS cannot be negative')
if (noiseRMS.shape != (1,1,1)) and (noiseRMS.shape != (1,nchan,1)) and (noiseRMS.shape != (1,1,ntimes)) and (noiseRMS.shape != (nbl,1,1)) and (noiseRMS.shape != (nbl,nchan,1)) and (noiseRMS.shape != (nbl,1,ntimes)) and (noiseRMS.shape != (1,nchan,ntimes)) and (noiseRMS.shape != (nbl,nchan,ntimes)):
raise IndexError('Noise RMS specified has incompatible dimensions')
return noiseRMS / NP.sqrt(2.0) * (NP.random.randn(nbl,nchan,ntimes) + 1j * NP.random.randn(nbl,nchan,ntimes)) # sqrt(2.0) is to split equal uncertainty into real and imaginary parts
################################################################################
def read_gaintable(gainsfile, axes_order=None):
"""
---------------------------------------------------------------------------
Read gain table from file and return
Input:
gainsfile [string] Filename including the full path that contains the
instrument gains. It must be in HDF5 format. It must contain
the following structure:
'antenna-based' [dictionary] Contains antenna-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key 'ordering'.
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of antenna labels that
correspond to the nax along
the 'label' axis. If the
nax=1 along the 'label' axis,
this may be set to None, else
it must be specified and must
match the nax.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nax=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nax.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
nax=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the nax. It must be
a float and can be in seconds,
hours, days, etc.
'baseline-based' [dictionary] Contains baseline-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key 'ordering'.
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of baseline labels that
correspond to the nax along
the 'label' axis. If the
nax=1 along the 'label' axis
this may be set to None, else
it must be specified and must
match the nax.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nax=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nax.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
nax=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the nax. It must be
a float and can be in seconds,
hours, days, etc.
axes_order [None or list or numpy array] The gaintable which is read is
stored in this axes ordering. If set to None, it will store in
this order ['label', 'frequency', 'time']
Output:
gaintable [None or dictionary] If set to None, all antenna- and baseline-
based gains must be set to unity. If returned as dictionary, it
contains the loaded gains. It contains the following keys and
values:
'antenna-based' [None or dictionary] Contains antenna-based
instrument gain information. If set to None,
all antenna-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of antenna labels that
correspond to nant along
the 'label' axis. If nant=1,
this may be set to None, else
it will be specified and will
match the nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It will
be a float and in same units as
given in input
'baseline-based' [None or dictionary] Contains baseline-based
instrument gain information. If set to None,
all baseline-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nbl,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of baseline labels that
correspond to nbl along the
'label' axis. If nbl=1 along
the 'label' axis this may be
set to None, else it will be
specified and will match nbl.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It will
be a float and in same units as
given in input
---------------------------------------------------------------------------
"""
if axes_order is None:
axes_order = ['label', 'frequency', 'time']
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
gaintable = {}
try:
with h5py.File(gainsfile, 'r') as fileobj:
for gainkey in fileobj:
try:
gaintable[gainkey] = {}
grp = fileobj[gainkey]
if isinstance(grp['gains'].value, (NP.float32, NP.float64, NP.complex64, NP.complex128)):
gaintable[gainkey]['gains'] = NP.asarray(grp['gains'].value).reshape(1,1,1)
elif isinstance(grp['gains'].value, NP.ndarray):
if 'ordering' in grp:
ordering = list(grp['ordering'].value)
else:
raise KeyError('Axes ordering for gains not specified')
if len(ordering) != 3:
raise ValueError('Ordering must contain three elements')
elif ('time' not in ordering) or ('label' not in ordering) or ('frequency' not in ordering):
raise ValueError('Required elements not found in ordering of instrument gains')
else:
if grp['gains'].value.ndim == 3:
transpose_order = NMO.find_list_in_list(ordering, axes_order)
gaintable[gainkey]['gains'] = NP.transpose(grp['gains'].value, axes=transpose_order)
for subkey in ['time', 'label', 'frequency']:
gaintable[gainkey][subkey] = None
if isinstance(grp[subkey].value, NP.ndarray):
if gaintable[gainkey]['gains'].shape[axes_order.index(subkey)] > 1:
if subkey not in grp:
raise KeyError('Key "{0}" not specified'.format(subkey))
else:
if not isinstance(grp[subkey].value, (list, NP.ndarray)):
raise TypeError('"{0} key must be specified as a list or numpy array'.format(subkey))
gaintable[gainkey][subkey] = NP.asarray(grp[subkey].value).ravel()
if gaintable[gainkey][subkey].size != gaintable[gainkey]['gains'].shape[axes_order.index(subkey)]:
raise ValueError('List of labels and the gains do not match in dimensions')
else:
raise TypeError('Value of key "{0}" in {1} gains must be a numpy array'.format(subkey, gainkey))
else:
raise ValueError('Gains array must be three-dimensional. Use fake dimension if there is no variation along any particular axis.')
else:
warnings.warn('Invalid data type specified for {0} instrument gains. Proceeding with defaults (unity gains)'.format(gainkey))
gaintable[gainkey]['ordering'] = axes_order
except KeyError:
warnings.warn('No info found on {0} instrument gains. Proceeding with defaults (unity gains)'.format(gainkey))
except IOError:
warnings.warn('Invalid file specified for instrument gains. Proceeding with defaults (unity gains)')
gaintable = None
if not gaintable:
gaintable = None
return gaintable
################################################################################
def extract_gains(gaintable, bl_labels, freq_index=None, time_index=None,
axes_order=None):
"""
---------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table.
Inputs:
gaintable [None or dictionary] If set to None, all antenna- and baseline-
based gains must be set to unity. If returned as dictionary, it
contains the loaded gains. It contains the following keys and
values:
'antenna-based' [None or dictionary] Contains antenna-based
instrument gain information. If set to None,
all antenna-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and
'time'.
'label' [None or list or numpy array]
List or antenna labels that
correspond to nant along
the 'label' axis. If nant=1,
this may be set to None, else
it will be specified and will
match the nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
'baseline-based' [None or dictionary] Contains baseline-based
instrument gain information. If set to None,
all baseline-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and
'time'.
'label' [None or list or numpy array]
List or baseline labels that
correspond to nbl along
the 'label' axis. If nbl=1
along the 'label' axis
this may be set to None, else
it will be specified and will
match nbl.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
bl_labels [Numpy structured array tuples] Labels of antennas in the pair
used to produce the baseline vector under fields 'A2' and 'A1'
for second and first antenna respectively. The baseline vector
is obtained by position of antennas under 'A2' minus position
of antennas under 'A1'
freq_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the frequency axis at which gains
are to be extracted. If set to None, gains at all frequencies
in the gain table will be extracted.
time_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the time axis at which gains
are to be extracted. If set to None, gains at all timesin the
gain table will be extracted.
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be returned
in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
---------------------------------------------------------------------------
"""
try:
gaintable, bl_labels
except NameError:
raise NameError('Inputs gaintable and bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for gainkey in ['antenna-based', 'baseline-based']:
if gainkey in gaintable:
temp_axes_order = ['label', 'frequency', 'time']
inp_order = gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(gaintable[gainkey]['gains']), axes=temp_transpose_order)
if freq_index is None:
freq_index = NP.arange(gains.shape[1])
elif isinstance(freq_index, (int,list,NP.ndarray)):
freq_index = NP.asarray(freq_index).ravel()
if NP.any(freq_index >= gains.shape[1]):
raise IndexError('Input freq_index cannot exceed the frequency dimensions in the gain table')
if time_index is None:
time_index = NP.arange(gains.shape[2])
elif isinstance(time_index, (int,list,NP.ndarray)):
time_index = NP.asarray(time_index).ravel()
if NP.any(time_index >= gains.shape[2]):
raise IndexError('Input time_index cannot exceed the time dimensions in the gain table')
if gains.shape[0] == 1:
blgains = blgains * gains[:,freq_index,time_index].reshape(1,freq_index.size,time_index.size)
else:
labels = gaintable[gainkey]['label']
if gainkey == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
blgains = blgains * gains[NP.ix_(ind2,freq_index,time_index)].reshape(ind2.size,freq_index.size,time_index.size) * gains[NP.ix_(ind1,freq_index,time_index)].conj().reshape(ind1.size,freq_index.size,time_index.size)
else:
labels_conj = [tuple(reversed(label)) for label in labels]
labels_conj = NP.asarray(labels_conj, dtype=labels.dtype)
labels_conj_appended = NP.concatenate((labels, labels_conj), axis=0)
gains_conj_appended = NP.concatenate((gains, gains.conj()), axis=0)
ind = NMO.find_list_in_list(labels_conj_appended, bl_labels)
selected_gains = gains_conj_appended[NP.ix_(ind.compressed(),freq_index,time_index)]
if ind.compressed().size == 1:
selected_gains = selected_gains.reshape(NP.sum(~ind.mask),freq_index.size,time_index.size)
blgains[~ind.mask, ...] = blgains[~ind.mask, ...] * selected_gains
if axes_order is None:
axes_order = inp_order
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(inp_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
################################################################################
def hexagon_generator(spacing, n_total=None, n_side=None, orientation=None,
center=None):
"""
------------------------------------------------------------------------
Generate a grid of baseline locations filling a regular hexagon.
Primarily intended for HERA experiment.
Inputs:
spacing [scalar] positive scalar specifying the spacing between
antennas. Must be specified, no default.
n_total [scalar] positive integer specifying the total number of
antennas to be placed in the hexagonal array. This value
will be checked if it valid for a regular hexagon. If
n_total is specified, n_side must not be specified.
Default = None.
n_side [scalar] positive integer specifying the number of antennas
on the side of the hexagonal array. If n_side is specified,
n_total should not be specified. Default = None
orientation [scalar] counter-clockwise angle (in degrees) by which the
principal axis of the hexagonal array is to be rotated.
Default = None (means 0 degrees)
center [2-element list or numpy array] specifies the center of the
array. Must be in the same units as spacing. The hexagonal
array will be centered on this position.
Outputs:
Two element tuple with these elements in the following order:
xy [2-column array] x- and y-locations. x is in the first
column, y is in the second column. Number of xy-locations
is equal to the number of rows which is equal to n_total
id [numpy array of string] unique antenna identifier. Numbers
from 0 to n_antennas-1 in string format.
Notes:
If n_side is the number of antennas on the side of the hexagon, then
n_total = 3*n_side**2 - 3*n_side + 1
------------------------------------------------------------------------
"""
try:
spacing
except NameError:
raise NameError('No spacing provided.')
if not isinstance(spacing, (int, float)):
raise TypeError('spacing must be scalar value')
if spacing <= 0:
raise ValueError('spacing must be positive')
if orientation is not None:
if not isinstance(orientation, (int,float)):
raise TypeError('orientation must be a scalar')
if center is not None:
if not isinstance(center, (list, NP.ndarray)):
raise TypeError('center must be a list or numpy array')
center = NP.asarray(center)
if center.size != 2:
raise ValueError('center should be a 2-element vector')
center = center.reshape(1,-1)
if (n_total is None) and (n_side is None):
raise NameError('n_total or n_side must be provided')
elif (n_total is not None) and (n_side is not None):
raise ValueError('Only one of n_total or n_side must be specified.')
elif n_total is not None:
if not isinstance(n_total, int):
raise TypeError('n_total must be an integer')
if n_total <= 0:
raise ValueError('n_total must be positive')
else:
if not isinstance(n_side, int):
raise TypeError('n_side must be an integer')
if n_side <= 0:
raise ValueError('n_side must be positive')
if n_total is not None:
sqroots = NP.roots([3.0, -3.0, 1.0-n_total])
valid_ind = NP.logical_and(sqroots.real >= 1, sqroots.imag == 0.0)
if NP.any(valid_ind):
sqroot = sqroots[valid_ind]
else:
raise ValueError('No valid root found for the quadratic equation with the specified n_total')
n_side = NP.round(sqroot).astype(NP.int)
if (3*n_side**2 - 3*n_side + 1 != n_total):
raise ValueError('n_total is not a valid number for a hexagonal array')
else:
n_total = 3*n_side**2 - 3*n_side + 1
xref = NP.arange(2*n_side-1, dtype=NP.float)
xloc, yloc = [], []
for i in range(1,n_side):
x = xref[:-i] + i * NP.cos(NP.pi/3) # Select one less antenna each time and displace
y = i*NP.sin(NP.pi/3) * NP.ones(2*n_side-1-i)
xloc += x.tolist() * 2 # Two lists, one for the top and the other for the bottom
yloc += y.tolist() # y-locations of the top list
yloc += (-y).tolist() # y-locations of the bottom list
xloc += xref.tolist() # Add the x-locations of central line of antennas
yloc += [0.0] * int(2*n_side-1) # Add the y-locations of central line of antennas
if len(xloc) != len(yloc):
raise ValueError('Sizes of x- and y-locations do not agree')
xy = zip(xloc, yloc)
if len(xy) != n_total:
raise ValueError('Sizes of x- and y-locations do not agree with n_total')
xy = NP.asarray(xy)
xy = xy - NP.mean(xy, axis=0, keepdims=True) # Shift the center to origin
if orientation is not None: # Perform any rotation
angle = NP.radians(orientation)
rot_matrix = NP.asarray([[NP.cos(angle), -NP.sin(angle)],
[NP.sin(angle), NP.cos(angle)]])
xy = NP.dot(xy, rot_matrix.T)
xy *= spacing # Scale by the spacing
if center is not None: # Shift the center
xy += center
return (NP.asarray(xy), map(str, range(n_total)))
################################################################################
def rectangle_generator(spacing, n_side, orientation=None, center=None):
"""
------------------------------------------------------------------------
Generate a grid of baseline locations filling a rectangular array.
Primarily intended for HIRAX, CHIME and PAPER experiments
Inputs:
spacing [2-element list or numpy array] positive integers specifying
the spacing between antennas. Must be specified, no default.
n_side [2-element list or numpy array] positive integers specifying
the number of antennas on each side of the rectangular array.
Atleast one value should be specified, no default.
orientation [scalar] counter-clockwise angle (in degrees) by which the
principal axis of the rectangular array is to be rotated.
Default = None (means 0 degrees)
center [2-element list or numpy array] specifies the center of the
array. Must be in the same units as spacing. The rectangular
array will be centered on this position.
Outputs:
Two element tuple with these elements in the following order:
xy [2-column array] x- and y-locations. x is in the first
column, y is in the second column. Number of xy-locations
is equal to the number of rows which is equal to n_total
id [numpy array of string] unique antenna identifier. Numbers
from 0 to n_antennas-1 in string format.
Notes:
------------------------------------------------------------------------
"""
try:
spacing
except NameError:
raise NameError('No spacing provided.')
if spacing is not None:
if not isinstance(spacing, (int, float, list, NP.ndarray)):
raise TypeError('spacing must be a scalar or list/numpy array')
spacing = NP.asarray(spacing)
if spacing.size < 2:
spacing = NP.resize(spacing,(1,2))
if NP.all(NP.less_equal(spacing,NP.zeros((1,2)))):
raise ValueError('spacing must be positive')
if orientation is not None:
if not isinstance(orientation, (int,float)):
raise TypeError('orientation must be a scalar')
if center is not None:
if not isinstance(center, (list, NP.ndarray)):
raise TypeError('center must be a list or numpy array')
center = NP.asarray(center)
if center.size != 2:
raise ValueError('center should be a 2-element vector')
center = center.reshape(1,-1)
if n_side is None:
raise NameError('Atleast one value of n_side must be provided')
else:
if not isinstance(n_side, (int, float, list, NP.ndarray)):
raise TypeError('n_side must be a scalar or list/numpy array')
n_side = NP.asarray(n_side)
if n_side.size < 2:
n_side = NP.resize(n_side,(1,2))
if NP.all(NP.less_equal(n_side,NP.zeros((1,2)))):
raise ValueError('n_side must be positive')
n_total = NP.prod(n_side, dtype=NP.uint8)
xn,yn = NP.hsplit(n_side,2)
xn = NP.asscalar(xn)
yn = NP.asscalar(yn)
xs,ys = NP.hsplit(spacing,2)
xs = NP.asscalar(xs)
ys = NP.asscalar(ys)
n_total = xn*yn
x = NP.linspace(0, xn-1, xn)
x = x - NP.mean(x)
x = x*xs
y = NP.linspace(0, yn-1, yn)
y = y - NP.mean(y)
y = y*ys
xv, yv = NP.meshgrid(x,y)
xy = NP.hstack((xv.reshape(-1,1),yv.reshape(-1,1)))
if len(xy) != n_total:
raise ValueError('Sizes of x- and y-locations do not agree with n_total')
if orientation is not None: # Perform any rotation
angle = NP.radians(orientation)
rot_matrix = NP.asarray([[NP.cos(angle), -NP.sin(angle)], [NP.sin(angle), NP.cos(angle)]])
xy = NP.dot(xy, rot_matrix.T)
if center is not None: # Shift the center
xy += center
return (NP.asarray(xy), map(str, range(n_total)))
################################################################################
def circular_antenna_array(antsize, minR, maxR=None):
"""
---------------------------------------------------------------------------
Create antenna layout in a circular ring of minimum and maximum radius with
antennas of a given size
Inputs:
antsize [scalar] Antenna size. Critical to determining number of antenna
elements that can be placed on a circle. No default.
minR [scalar] Minimum radius of the circular ring. Must be in same
units as antsize. No default. Must be greater than 0.5*antsize.
maxR [scalar] Maximum radius of circular ring. Must be >= minR.
Default=None means maxR is set equal to minR.
Outputs:
xy [2-column numpy array] Antenna locations in the same units as
antsize returned as a 2-column numpy array where the number of
rows equals the number of antenna locations generated and x,
and y locations make the two columns.
---------------------------------------------------------------------------
"""
try:
antsize, minR
except NameError:
raise NameError('antsize, and minR must be specified')
if (antsize is None) or (minR is None):
raise ValueError('antsize and minR cannot be NoneType')
if not isinstance(antsize, (int, float)):
raise TypeError('antsize must be a scalar')
if antsize <= 0.0:
raise ValueError('antsize must be positive')
if not isinstance(minR, (int, float)):
raise TypeError('minR must be a scalar')
if minR <= 0.0:
raise ValueError('minR must be positive')
if minR < 0.5*antsize:
minR = 0.5*antsize
if maxR is None:
maxR = minR
if not isinstance(maxR, (int, float)):
raise TypeError('maxR must be a scalar')
elif maxR < minR:
maxR = minR
if maxR - minR < antsize:
radii = minR + NP.zeros(1)
else:
radii = minR + antsize * NP.arange((maxR-minR)/antsize)
nants = 2 * NP.pi * radii / antsize
nants = nants.astype(NP.int)
x = [(radii[i] * NP.cos(2*NP.pi*NP.arange(nants[i])/nants[i])).tolist() for i in range(radii.size)]
y = [(radii[i] * NP.sin(2*NP.pi*NP.arange(nants[i])/nants[i])).tolist() for i in range(radii.size)]
xpos = [xi for sublist in x for xi in sublist]
ypos = [yi for sublist in y for yi in sublist]
x = NP.asarray(xpos)
y = NP.asarray(ypos)
xy = NP.hstack((x.reshape(-1,1), y.reshape(-1,1)))
return (xy, map(str, range(NP.sum(nants))))
################################################################################
def baseline_generator(antenna_locations, ant_label=None, ant_id=None,
auto=False, conjugate=False):
"""
---------------------------------------------------------------------------
Generate baseline from antenna locations.
Inputs:
antenna_locations: List of tuples containing antenna coordinates,
or list of instances of class Point containing
antenna coordinates, or Numpy array (Nx3) array
with each row specifying an antenna location.
Input keywords:
ant_label [list of strings] Unique string identifier for each
antenna. Default = None. If None provided,
antennas will be indexed by an integer starting
from 0 to N(ants)-1
ant_id [list of integers] Unique integer identifier for each
antenna. Default = None. If None provided,
antennas will be indexed by an integer starting
from 0 to N(ants)-1
auto: [Default=False] If True, compute zero spacings of
antennas with themselves.
conjugate: [Default=False] If True, compute conjugate
baselines.
Output:
baseline_locations: Baseline locations in the same data type as
antenna locations (list of tuples, list of
instances of class Point or Numpy array of size
Nb x 3 with each row specifying one baseline
vector)
antpair_labels [Numpy structured array tuples] Labels of
antennas in the pair used to produce the
baseline vector under fields 'A2' and 'A1' for
second and first antenna respectively. The
baseline vector is obtained by position of
antennas under 'A2' minus position of antennas
under 'A1'
antpair_ids [Numpy structured array tuples] IDs of antennas
in the pair used to produce the baseline vector
under fields 'A2' and 'A1' for second and first
antenna respectively. The baseline vector is
obtained by position of antennas under 'A2'
minus position of antennas under 'A1'
-------------------------------------------------------------------
"""
try:
antenna_locations
except NameError:
warnings.warn('No antenna locations supplied. Returning from baseline_generator()')
return None
inp_type = 'tbd'
if not isinstance(antenna_locations, NP.ndarray):
if isinstance(antenna_locations, list):
if isinstance(antenna_locations[0], GEOM.Point):
inp_type = 'loo' # list of objects
elif isinstance(antenna_locations[0], tuple):
inp_type = 'lot' # list of tuples
antenna_locations = [(tuple(loc) if len(loc) == 3 else (tuple([loc[0],0.0,0.0]) if len(loc) == 1 else (tuple([loc[0],loc[1],0.0]) if len(loc) == 2 else (tuple([loc[0],loc[1],loc[2]]))))) for loc in antenna_locations if len(loc) != 0] # Remove empty tuples and validate the data range and data type for antenna locations. Force it to have three components for every antenna location.
elif isinstance(antenna_locations, GEOM.Point):
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return GEOM.Point()
elif isinstance(antenna_locations, tuple):
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return (0.0,0.0,0.0)
else:
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return (0.0,0.0,0.0)
else:
inp_type = 'npa' # A numpy array
if antenna_locations.shape[0] == 1:
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return NP.zeros(1,3)
else:
if antenna_locations.shape[1] > 3:
antenna_locations = antenna_locations[:,:3]
elif antenna_locations.shape[1] < 3:
antenna_locations = NP.hstack((antenna_locations, NP.zeros((antenna_locations.shape[0],3-antenna_locations.shape[1]))))
if isinstance(antenna_locations, list):
num_ants = len(antenna_locations)
else:
num_ants = antenna_locations.shape[0]
if ant_label is not None:
if isinstance(ant_label, list):
if len(ant_label) != num_ants:
raise ValueError('Dimensions of ant_label and antenna_locations do not match.')
elif isinstance(ant_label, NP.ndarray):
if ant_label.size != num_ants:
raise ValueError('Dimensions of ant_label and antenna_locations do not match.')
ant_label = ant_label.tolist()
else:
ant_label = ['{0:0d}'.format(i) for i in xrange(num_ants)]
if ant_id is not None:
if isinstance(ant_id, list):
if len(ant_id) != num_ants:
raise ValueError('Dimensions of ant_id and antenna_locations do not match.')
elif isinstance(ant_id, NP.ndarray):
if ant_id.size != num_ants:
raise ValueError('Dimensions of ant_id and antenna_locations do not match.')
ant_id = ant_id.tolist()
else:
ant_id = range(num_ants)
if inp_type == 'loo': # List of objects
if auto:
baseline_locations = [antenna_locations[j]-antenna_locations[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [antenna_locations[j]-antenna_locations[i] for i in range(0,num_ants) for j in range(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [antenna_locations[j]-antenna_locations[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
elif inp_type == 'lot': # List of tuples
if auto:
baseline_locations = [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
elif inp_type == 'npa': # Numpy array
if auto:
baseline_locations = [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
baseline_locations = NP.asarray(baseline_locations)
maxlen = max(len(albl) for albl in ant_label)
antpair_labels = NP.asarray(antpair_labels, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
antpair_ids = NP.asarray(antpair_ids, dtype=[('A2', int), ('A1', int)])
return baseline_locations, antpair_labels, antpair_ids
#################################################################################
def uniq_baselines(baseline_locations, redundant=None):
"""
---------------------------------------------------------------------------
Identify unique, redundant or non-redundant baselines from a given set of
baseline locations.
Inputs:
baseline_locations [2- or 3-column numpy array] Each row of the array
specifies a baseline vector from which the required
set of baselines have to be identified
redundant [None or boolean] If set to None (default), all the
unique baselines including redundant and non-redundant
baselines are returned. If set to True, only redundant
baselines that occur more than once are returned. If set
to False, only non-redundant baselines that occur
exactly once are returned.
Output:
4-element tuple with the selected baselines, their unique indices in the
input, their count and the indices of all occurences of each unique
baseline. The first element of this tuple is a 3-column numpy array
which is a subset of baseline_locations containing the requested type of
baselines. The second element of the tuple contains the selected indices
of the input array from which the first element in the tuple is determined
relative to the input array. The third element of the tuple contains the
count of these selected baselines. In case of redundant and unique
baselines, the order of repeated baselines does not matter and any one of
those baselines could be returned without preserving the order. The fourth
element in the tuple contains a list of lists where each element in the
top level list corresponds to a unique baseline and consists of indices
of all occurrences of input baselines redundant with this unique baseline
---------------------------------------------------------------------------
"""
try:
baseline_locations
except NameError:
raise NameError('baseline_locations not provided')
if not isinstance(baseline_locations, NP.ndarray):
raise TypeError('baseline_locations must be a numpy array')
if redundant is not None:
if not isinstance(redundant, bool):
raise TypeError('keyword "redundant" must be set to None or a boolean value')
blshape = baseline_locations.shape
if blshape[1] > 3:
baseline_locations = baseline_locations[:,:3]
elif blshape[1] < 3:
baseline_locations = NP.hstack((baseline_locations, NP.zeros((blshape[0],3-blshape[1]))))
blo = NP.angle(baseline_locations[:,0] + 1j * baseline_locations[:,1], deg=True)
blo[blo >= 180.0] -= 180.0
blo[blo < 0.0] += 180.0
bll = NP.sqrt(NP.sum(baseline_locations**2, axis=1))
blza = NP.degrees(NP.arccos(baseline_locations[:,2] / bll))
blstr = ['{0[0]:.2f}_{0[1]:.3f}_{0[2]:.3f}'.format(lo) for lo in zip(bll,3.6e3*blza,3.6e3*blo)]
uniq_blstr, ind, invind = NP.unique(blstr, return_index=True, return_inverse=True) ## if numpy.__version__ < 1.9.0
# uniq_blstr, ind, invind, frequency = NP.unique(blstr, return_index=True, return_inverse=True, return_counts=True) ## if numpy.__version__ >= 1.9.0
count_blstr = [(ubstr,blstr.count(ubstr)) for ubstr in uniq_blstr] ## if numpy.__version__ < 1.9.0
if redundant is None:
retind = NP.copy(ind)
counts = [tup[1] for tup in count_blstr]
counts = NP.asarray(counts)
else:
if not redundant:
## if numpy.__version__ < 1.9.0
non_redn_ind = [i for i,tup in enumerate(count_blstr) if tup[1] == 1]
retind = ind[NP.asarray(non_redn_ind)]
counts = NP.ones(retind.size)
else:
## if numpy.__version__ < 1.9.0
redn_ind_counts = [(i,tup[1]) for i,tup in enumerate(count_blstr) if tup[1] > 1]
redn_ind, counts = zip(*redn_ind_counts)
retind = ind[NP.asarray(redn_ind)]
counts = NP.asarray(counts)
allinds_where_found = NMO.find_all_occurrences_list1_in_list2(invind[retind], invind)
return (baseline_locations[retind,:], retind, counts, allinds_where_found)
#################################################################################
def getBaselineInfo(inpdict):
"""
---------------------------------------------------------------------------
Generate full baseline info from a given layout and return information
about redundancy and the mapping between unique and redundant baselines
Input:
inpdict [dictionary] It contains the following keys and values:
'array' [dictionary] It contains the following keys and values:
'redundant' [boolean] If this key is present, it says
whether the array could be redundant (true)
or not (false). If key is absent, this
value is assumed to be true. When it is set
to true, it basically checks for redundancy
otherwise not. It is not meant to say if
the array is actually redundant or not but
only used for redundancy check to happen or
not
'layout' [string] Preset array layouts mutually
exclusive to antenna file. Only one of
these must be specified. Accepted
values are 'MWA-128T', 'HERA-7', 'HERA-19',
'HERA-37', 'HERA-61', 'HERA-91',
'HERA-127', 'HERA-169', 'HERA-217',
'HERA-271', 'HERA-331', 'PAPER-64',
'PAPER-112', 'HIRAX-1024', 'CHIME', 'CIRC',
or None (if layout file is specified).
'file' [string] File containing antenna locations
parsed according to info in parser (see
below). If preset layout is specified, this
must be set to None.
'filepathtype'
[string] Accepted values are 'default' (if
layout file can be found in prisim path,
namely, prisim/data/array_layouts folder)
and 'custom'. If set to 'default', only
filename should be specified in file and it
will be searched in the default
array_layouts folder
prisim/data/array_layouts.
If set to 'custom' then the full path
to the file must be specified.
'parser' [dictionary] Will be used for parsing the
file if file is specified for array layout.
It contains the following keys and values:
'comment' [string] Character used to
denote commented lines to be
ignored. Default=None ('#')
'delimiter' [string] Delimiter string.
Accepted values are whitespace
(default or None), ',' and '|'
'data_strart'
[integer] Line index for the
start of data not counting
comment or blank lines. A line
with only whitespace is
considered blank. It is
required. No defaults.
Indexing starts from 0
'data_end' [integer] Line index for the end
of data not counting comment or
blank lines. This value can be
negative to count from the end.
Default is None (all the way to
end of file). Indexing starts
from 0.
'header_start'
[integer] Line index for the
header line not counting comment
or blank lines. A line with only
whitespace is considered blank.
Must be provided. No defaults
'label' [string] String in the header
containing antenna labels. If
set to None (default), antenna
labels will be automatically
assigned. e.g. of some accepted
values are None, 'label', 'id',
'antid', etc. This must be found
in the header
'east' [string] String specifying East
coordinates in the header and
data. Must be provided. No
defaults.
'north' [string] String specifying North
coordinates in the header and
data. Must be provided. No
defaults.
'up' [string] String specifying
elevation coordinates in the
header and data. Must be
provided. No defaults.
'minR' [string] Minimum radius of circular ring.
Applies only when layout = 'CIRC'
'maxR' [string] Maximum radius of circular ring.
Applies only when layout = 'CIRC'
'rms_tgtplane'
[float] Perturbation of antenna positions
(in m) in tangent plane. Default=0.0
'rms_elevation'
[float] Perturbation of antenna positions
(in m) in perpendicular to tangent plane.
Default=0.0
'seed' [integer] Random number seed for antenna
position perturbations. Default=None means
no fixed seed
'baseline' [dictionary] Parameters specifying baseline
selection criteria. It consists of the following keys
and values:
'min' [float] Minimum baseline in distance
units (m). Default=None (0.0)
'max' [float] Maximum baseline in distance
units (m). Default=None (max baseline)
'direction' [string] Baseline vector directions to
select. Default=None (all directions).
Other accepted values are 'E' (east)
'SE' (south-east), 'NE' (north-east),
and 'N' (north). Multiple values from
this accepted list can be specified
as a list of strings. e.g., ['N', 'E'],
['NE', 'SE', 'E'], ['SE', 'E', 'NE', 'N']
which is equivalent to None, etc.
'skyparm' [dictionary] Sky model specification. It contains the
following keys and values:
'model' [string] Sky model. Accepted values
are 'csm' (NVSS+SUMSS point sources),
'dsm' (diffuse emission), 'asm' (both
point sources and diffuse emission),
'sumss' (SUMSS catalog), nvss (NVSS
catalog), 'mss' (Molonglo Sky Survey),
'gleam' (GLEAM catalog), 'custom'
(user-defined catalog), 'usm' (uniform
sky model), 'mwacs' (MWACS catalog),
'HI_monopole' (global EoR), HI_cube (HI
cube from external simulations), and
'HI_fluctuations' (HI fluctuations with
the global mean signal removed). If set
'HI_monopole' or 'monopole' the orientation
of the baseline vector does not matter
and only unique baseline lengths will be
selected if value under 'redundant' key is
set to True.
Output:
Dictionary containing the following keys and values.
'bl' [numpy array] Baseline vectors (unique ones or all depending on
value in key 'redundant'). It is of shape nbl x 3 and will
consist of unique baselines if value under key 'redundant' was
set to True. Otherwise, redundancy will not be checked and all
baselines will be returned.
'label' [numpy recarray] A unique label of each of the baselines.
Shape is nbl where each element is a recarray under fields 'A1'
(first antenna label) and 'A2' (second antenna label)
'id' [numpy recarray] A unique identifier of each of the baselines.
Shape is nbl where each element is a recarray under fields 'A1'
(first antenna id) and 'A2' (second antenna id)
'redundancy'
[boolean] If the array was originally found to be made of unique
baselines (False) or redundant baselines were found (True). Even
if set to False, the baselines may still be redundant because
redundancy may never have been checked if value under key
'redundant' was set to False
'groups'
[dictionary] Contains the grouping of unique baselines and the
redundant baselines as numpy recarray under each unique baseline
category/flavor. It contains as keys the labels (tuple of A1, A2)
of unique baselines and the value under each of these keys is a
list of baseline labels that are redundant under that category
'reversemap'
[dictionary] Contains the baseline category for each baseline.
The keys are baseline labels as tuple and the value under each
key is the label of the unique baseline category that it falls
under.
'layout_info'
[dictionary] Contains the antenna layout information with the
following keys and values:
'positions' [numpy array] Antenna locations with shape nant x 3
'labels' [numpy array of strings] Antenna labels of size nant
'ids' [numpy array of strings] Antenna IDs of size nant
'coords' [string] Coordinate system in which antenna locations
are specified. Currently only returns 'ENU' for East-
North-Up coordinate system
---------------------------------------------------------------------------
"""
try:
inpdict
except NameError:
raise NameError('Input inpdict must be specified')
if not isinstance(inpdict, dict):
raise TypeError('Input inpdict must be a dictionary')
if 'array' in inpdict:
if 'redundant' in inpdict['array']:
array_is_redundant = inpdict['array']['redundant']
else:
array_is_redundant = True
else:
raise KeyError('Key "array" not found in input inpdict')
sky_str = inpdict['skyparm']['model']
use_HI_monopole = False
if sky_str == 'HI_monopole':
use_HI_monopole = True
antenna_file = inpdict['array']['file']
array_layout = inpdict['array']['layout']
minR = inpdict['array']['minR']
maxR = inpdict['array']['maxR']
antpos_rms_tgtplane = inpdict['array']['rms_tgtplane']
antpos_rms_elevation = inpdict['array']['rms_elevation']
antpos_rms_seed = inpdict['array']['seed']
if antpos_rms_seed is None:
antpos_rms_seed = NP.random.randint(1, high=100000)
elif isinstance(antpos_rms_seed, (int,float)):
antpos_rms_seed = int(NP.abs(antpos_rms_seed))
else:
raise ValueError('Random number seed must be a positive integer')
minbl = inpdict['baseline']['min']
maxbl = inpdict['baseline']['max']
bldirection = inpdict['baseline']['direction']
if (antenna_file is None) and (array_layout is None):
raise ValueError('One of antenna array file or layout must be specified')
if (antenna_file is not None) and (array_layout is not None):
raise ValueError('Only one of antenna array file or layout must be specified')
if antenna_file is not None:
if not isinstance(antenna_file, str):
raise TypeError('Filename containing antenna array elements must be a string')
if inpdict['array']['filepathtype'] == 'default':
antenna_file = prisim_path+'data/array_layouts/'+antenna_file
antfile_parser = inpdict['array']['parser']
if 'comment' in antfile_parser:
comment = antfile_parser['comment']
if comment is None:
comment = '#'
elif not isinstance(comment, str):
raise TypeError('Comment expression must be a string')
else:
comment = '#'
if 'delimiter' in antfile_parser:
delimiter = antfile_parser['delimiter']
if delimiter is not None:
if not isinstance(delimiter, str):
raise TypeError('Delimiter expression must be a string')
else:
delimiter = ' '
else:
delimiter = ' '
if 'data_start' in antfile_parser:
data_start = antfile_parser['data_start']
if not isinstance(data_start, int):
raise TypeError('data_start parameter must be an integer')
else:
raise KeyError('data_start parameter not provided')
if 'data_end' in antfile_parser:
data_end = antfile_parser['data_end']
if data_end is not None:
if not isinstance(data_end, int):
raise TypeError('data_end parameter must be an integer')
else:
data_end = None
if 'header_start' in antfile_parser:
header_start = antfile_parser['header_start']
if not isinstance(header_start, int):
raise TypeError('header_start parameter must be an integer')
else:
raise KeyError('header_start parameter not provided')
if 'label' not in antfile_parser:
antfile_parser['label'] = None
elif antfile_parser['label'] is not None:
antfile_parser['label'] = str(antfile_parser['label'])
if 'east' not in antfile_parser:
raise KeyError('Keyword for "east" coordinates not provided')
else:
if not isinstance(antfile_parser['east'], str):
raise TypeError('Keyword for "east" coordinates must be a string')
if 'north' not in antfile_parser:
raise KeyError('Keyword for "north" coordinates not provided')
else:
if not isinstance(antfile_parser['north'], str):
raise TypeError('Keyword for "north" coordinates must be a string')
if 'up' not in antfile_parser:
raise KeyError('Keyword for "up" coordinates not provided')
else:
if not isinstance(antfile_parser['up'], str):
raise TypeError('Keyword for "up" coordinates must be a string')
try:
ant_info = ascii.read(antenna_file, comment=comment, delimiter=delimiter, header_start=header_start, data_start=data_start, data_end=data_end, guess=False)
except IOError:
raise IOError('Could not open file containing antenna locations.')
if (antfile_parser['east'] not in ant_info.colnames) or (antfile_parser['north'] not in ant_info.colnames) or (antfile_parser['up'] not in ant_info.colnames):
raise KeyError('One of east, north, up coordinates incompatible with the table in antenna_file')
if antfile_parser['label'] is not None:
ant_label = ant_info[antfile_parser['label']].data.astype('str')
else:
ant_label = NP.arange(len(ant_info)).astype('str')
east = ant_info[antfile_parser['east']].data
north = ant_info[antfile_parser['north']].data
elev = ant_info[antfile_parser['up']].data
if (east.dtype != NP.float) or (north.dtype != NP.float) or (elev.dtype != NP.float):
raise TypeError('Antenna locations must be of floating point type')
ant_locs = NP.hstack((east.reshape(-1,1), north.reshape(-1,1), elev.reshape(-1,1)))
else:
if array_layout not in ['MWA-128T', 'HERA-7', 'HERA-19', 'HERA-37', 'HERA-61', 'HERA-91', 'HERA-127', 'HERA-169', 'HERA-217', 'HERA-271', 'HERA-331', 'PAPER-64', 'PAPER-112', 'HIRAX-1024', 'CHIME', 'CIRC']:
raise ValueError('Invalid array layout specified')
if array_layout == 'MWA-128T':
ant_info = NP.loadtxt(prisim_path+'data/array_layouts/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', skiprows=6, comments='#', usecols=(0,1,2,3))
ant_label = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
elif array_layout == 'HERA-7':
ant_locs, ant_label = hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_label = hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_label = hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_label = hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_label = hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_label = hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_label = hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_label = hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_label = hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_label = hexagon_generator(14.6, n_total=331)
elif array_layout == 'PAPER-64':
ant_locs, ant_label = rectangle_generator([30.0, 4.0], [8, 8])
elif array_layout == 'PAPER-112':
ant_locs, ant_label = rectangle_generator([15.0, 4.0], [16, 7])
elif array_layout == 'HIRAX-1024':
ant_locs, ant_label = rectangle_generator(7.0, n_side=32)
elif array_layout == 'CHIME':
ant_locs, ant_label = rectangle_generator([20.0, 0.3], [5, 256])
elif array_layout == 'CIRC':
ant_locs, ant_label = circular_antenna_array(element_size, minR, maxR=maxR)
ant_label = NP.asarray(ant_label)
if ant_locs.shape[1] == 2:
ant_locs = NP.hstack((ant_locs, NP.zeros(ant_label.size).reshape(-1,1)))
antpos_rstate = NP.random.RandomState(antpos_rms_seed)
deast = antpos_rms_tgtplane/NP.sqrt(2.0) * antpos_rstate.randn(ant_label.size)
dnorth = antpos_rms_tgtplane/NP.sqrt(2.0) * antpos_rstate.randn(ant_label.size)
dup = antpos_rms_elevation * antpos_rstate.randn(ant_label.size)
denu = NP.hstack((deast.reshape(-1,1), dnorth.reshape(-1,1), dup.reshape(-1,1)))
ant_locs = ant_locs + denu
ant_locs_orig = NP.copy(ant_locs)
ant_label_orig = NP.copy(ant_label)
ant_id = NP.arange(ant_label.size, dtype=int)
ant_id_orig = NP.copy(ant_id)
layout_info = {'positions': ant_locs_orig, 'labels': ant_label_orig, 'ids': ant_id_orig, 'coords': 'ENU'}
bl_orig, bl_label_orig, bl_id_orig = baseline_generator(ant_locs_orig, ant_label=ant_label_orig, ant_id=ant_id_orig, auto=False, conjugate=False)
blo = NP.angle(bl_orig[:,0] + 1j * bl_orig[:,1], deg=True)
neg_blo_ind = (blo < -67.5) | (blo > 112.5)
bl_orig[neg_blo_ind,:] = -1.0 * bl_orig[neg_blo_ind,:]
blo = | NP.angle(bl_orig[:,0] + 1j * bl_orig[:,1], deg=True) | numpy.angle |
import numpy as np
from flask import Flask,request,jsonify,render_template
import pickle
import pandas as pd
import datetime as dt
from datetime import datetime
from datetime import datetime as dt
app = Flask(__name__)
model = pickle.load(open('model.pkl','rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods =['POST'])
def predict():
option = request.form['options']
date = request.form['date']
dat = date
name = request.form['name']
d = dt.strptime(date, '%d-%m-%Y').date()
date = d.toordinal()
date = | np.array(date) | numpy.array |
import numpy as np
import copy
def construct_preprocessor( trainingset, list_of_processors, **kwargs ):
combined_processor = lambda x: x
for entry in list_of_processors:
if type(entry) == tuple:
processor, processor_configuration = entry
assert type(processor_configuration) == dict, \
"The second argument to a preprocessor entry must be a dictionary of settings."
combined_processor = processor( combined_processor( trainingset ), **processor_configuration )
else:
processor = entry
combined_processor = processor( combined_processor( trainingset ))
return lambda dataset: combined_processor( copy.deepcopy( dataset ))
#end
def standarize( trainingset ):
"""
Morph the input signal to a mean of 0 and scale the signal strength by
dividing with the standard deviation (rather that forcing a [0, 1] range)
"""
def encoder( dataset ):
for instance in dataset:
if np.any(stds == 0):
nonzero_indexes = np.where(stds!=0)
instance.features[nonzero_indexes] = (instance.features[nonzero_indexes] - means[nonzero_indexes]) / stds[nonzero_indexes]
else:
instance.features = (instance.features - means) / stds
return dataset
#end
training_data = np.array( [instance.features for instance in trainingset ] )
means = training_data.mean(axis=0)
stds = training_data.std(axis=0)
return encoder
#end
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder
#end
def subtract_mean( trainingset ):
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features - means
return dataset
#end
training_data = np.array( [instance.features for instance in trainingset ] )
means = training_data.mean(axis=0)
return encoder
#end
def normalize( trainingset ):
"""
Morph the input signal to a mean of 0 and scale the signal strength by
dividing with the standard deviation (rather that forcing a [0, 1] range)
"""
def encoder( dataset ):
for instance in dataset:
if np.any(stds == 0):
nonzero_indexes = np.where(stds!=0)
instance.features[nonzero_indexes] = instance.features[nonzero_indexes] / stds[nonzero_indexes]
else:
instance.features = instance.features / stds
return dataset
#end
training_data = np.array( [instance.features for instance in trainingset ] )
stds = training_data.std(axis=0)
return encoder
#end
def whiten( trainingset, epsilon = 1e-5 ):
training_data = np.array( [instance.features for instance in trainingset ] )
def encoder(dataset):
for instance in dataset:
instance.features = np.dot(instance.features, W)
return dataset
#end
covariance = | np.dot(training_data.T, training_data) | numpy.dot |
"""
Spline fit 3D 4 channel example. Can for example be used in 4Pi-Storm microscopy.
Requires pyGpufit, pyGpuSpline (https://github.com/gpufit/Gpuspline), Numpy and Matplotlib.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygpuspline.gpuspline as gs
import pygpufit.gpufit as gf
import misc
def merge_channels(x):
"""
Takes a NxMx4 array and returns a 2Nx2M array by stacking the 4 2D images in a 2x2 grid.
:param x: Input array
:return: Output array
"""
return np.vstack((np.hstack((x[:, :, 0], x[:, :, 1])), np.hstack((x[:, :, 2], x[:, :, 3]))))
def calculate_psf(size_x, size_y, size_z, p, n_channels):
"""
calculate psf
:param size_x:
:param size_y:
:param size_z:
:param p:
:param n_channels:
:return:
"""
s_max = p[4] * 5
s_min = p[4] / 5
sx = np.linspace(s_max, s_min, size_z)
sy = np.linspace(s_min, s_max, size_z)
sz = 8
delta_s = sx[0] - sx[1]
sx = [sx, s_min - 1 * delta_s, s_min - 2 * delta_s, s_min - 3 * delta_s]
sy = [sy, s_max + 1 * delta_s, s_max + 2 * delta_s, s_max + 3 * delta_s]
x = np.linspace(0, np.pi, size_z)
a = np.zeros((x.size, 4))
a[:, 0] = np.sin(x + np.pi * 0 / 4) * 0.5 + 0.5
a[:, 1] = np.sin(x + np.pi * 1 / 4) * 0.5 + 0.5
a[:, 2] = np.sin(x + np.pi * 2 / 4) * 0.5 + 0.5
a[:, 3] = np.sin(x + np.pi * 3 / 4) * 0.5 + 0.5
f = np.zeros((size_x, size_y, size_z, n_channels), dtype=np.float32)
for ch in range(n_channels):
for xi in range(size_x):
for yi in range(size_y):
for zi in range(size_z):
arg_x = np.exp(-1 / 2 * ((xi - p[1]) / sx[zi + ch])**2)
arg_y = np.exp(-1 / 2 * ((yi - p[2]) / sy[zi + ch])**2)
arg_z = np.exp(-1 / 2 * ((zi + ch - p[3]) / sz)**2)
f[xi, yi, zi, ch] = a[zi, ch] * p[0] * arg_x * arg_y * arg_z + p[5]
return f
def spline_values_3d_multichannel(coefficients, n_intervals, n_channels, p):
"""
:param coefficients:
:param n_intervals:
:param n_channels:
:param p:
:return:
"""
coefficients = np.reshape(coefficients, (64, n_intervals[0], n_intervals[1], n_intervals[2], n_channels))
x = np.arange(n_intervals[0])-p[1]
y = np.arange(n_intervals[1])-p[2]
z = -p[3]
f = p[0] * gs.spline_values(coefficients, x, y, z) + p[4]
return f
if __name__ == '__main__':
# initialize random number generator
rng = np.random.default_rng(0)
# data size
size_x = 19
size_y = 25
size_z = 50
n_channels = 4
# tolerances
tolerance = 1e-30
max_n_iterations = 200
estimator_id = gf.EstimatorID.LSE
# derived values
x = np.arange(size_x, dtype=np.float32)
y = np.arange(size_y, dtype=np.float32)
z = np.arange(size_z, dtype=np.float32)
# generate PSF
psf_parameters = np.array([100, (size_x - 1) / 2 + 1, (size_y - 1) / 2 - 1, (size_z - 1) / 2, 1, 10], dtype=np.float32)
psf = calculate_psf(size_x, size_y, size_z, psf_parameters, n_channels)
z_slice_index = 25
# add noise
snr = 5
amplitude = psf_parameters[0]
noise_std_dev = amplitude / (snr * | np.log(10.0) | numpy.log |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | np.array([]) | numpy.array |
# Select and shuffle a random subset of available data, and apply data augmentation techniques.
# I tried adding a separate thread to fill a queue of unzipped spectrograms,
# but it didn't actually improve performance.
import random
import sys
import numpy as np
import skimage
from skimage import filters
from core import audio
from core import constants
from core import util
from core import plot
PROB_MERGE = 0.2 # probability of merging to train multi-label support
PROB_AUG = 0.55 # probability of augmentation
CACHE_LEN = 1000 # cache this many noise specs for performance
MAX_SHIFT = 5 # max pixels for horizontal shift
NOISE_VARIANCE = 0.0015 # larger variances lead to more noise
SPECKLE_VARIANCE = .009
# relative frequencies of the augmentation types
BLUR_INDEX = 0 # so FREQS[0] is relative frequency of blur
FADE_INDEX = 1
LOW_INDEX = 2
NOISE_INDEX = 3
SHIFT_INDEX = 4
SPECKLE_INDEX = 5
FREQS = [0.25, 0.5, 0.2, 1.0, 0.5, 0.9]
class DataGenerator():
def __init__(self, x_train, y_train, seed=None, augmentation=True, binary_classifier=False, multilabel=False):
self.x_train = x_train
self.y_train = y_train
self.seed = seed
self.augmentation = augmentation
self.binary_classifier = binary_classifier
self.multilabel = multilabel
if binary_classifier:
self.spec_height = constants.BINARY_SPEC_HEIGHT
else:
self.spec_height = constants.SPEC_HEIGHT
self.indices = np.arange(y_train.shape[0])
if self.augmentation:
# convert relative frequencies to probability ranges in [0, 1]
freqs = np.array(FREQS)
sum = np.sum(freqs)
probs = freqs / sum
self.probs = | np.zeros(SPECKLE_INDEX + 1) | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.